1 |
d6ca840d
|
petrh
|
from Utilities import folder_processor
|
2 |
|
|
from Utilities.Crawler import basic_crawler_functions
|
3 |
|
|
|
4 |
|
|
# Path to crawled data
|
5 |
|
|
CRAWLED_DATA_PATH = "CrawledData/"
|
6 |
|
|
|
7 |
|
|
|
8 |
|
|
def crawl(config):
|
9 |
|
|
"""
|
10 |
|
|
Implement crawl method that downloads new data to path_for_files
|
11 |
|
|
For keeping the project structure
|
12 |
|
|
url , regex, and dataset_name from config
|
13 |
|
|
You can use already implemented functions from Utilities/Crawler/BasicCrawlerFunctions.py
|
14 |
|
|
|
15 |
|
|
Args:
|
16 |
|
|
config: loaded configuration file of dataset
|
17 |
|
|
"""
|
18 |
|
|
dataset_name = config["dataset-name"]
|
19 |
|
|
url = config['url']
|
20 |
|
|
regex = config['regex']
|
21 |
|
|
path_for_files = CRAWLED_DATA_PATH + dataset_name + '/'
|
22 |
|
|
|
23 |
|
|
first_level_links = basic_crawler_functions.get_all_links(url)
|
24 |
|
|
filtered_first_level_links = basic_crawler_functions.filter_links(first_level_links, "^OD_ZCU")
|
25 |
|
|
absolute_first_level_links = basic_crawler_functions.create_absolute_links(filtered_first_level_links, url)
|
26 |
|
|
|
27 |
|
|
files = []
|
28 |
|
|
|
29 |
|
|
for link in absolute_first_level_links:
|
30 |
|
|
second_level_links = basic_crawler_functions.get_all_links(link)
|
31 |
|
|
filtered_second_level_links = basic_crawler_functions.filter_links(second_level_links, regex)
|
32 |
|
|
absolute_second_level_links = basic_crawler_functions.create_absolute_links(filtered_second_level_links, link)
|
33 |
|
|
|
34 |
3692d853
|
petrh
|
for file_link in absolute_second_level_links:
|
35 |
d6ca840d
|
petrh
|
files.append(file_link)
|
36 |
|
|
|
37 |
3692d853
|
petrh
|
files = basic_crawler_functions.remove_downloaded_links(files, dataset_name)
|
38 |
|
|
|
39 |
d6ca840d
|
petrh
|
for file in files:
|
40 |
|
|
basic_crawler_functions.download_file_from_url(file, dataset_name)
|
41 |
|
|
|
42 |
|
|
folder_processor.unzip_all_csv_zip_files_in_folder(path_for_files)
|