1 |
b3262a44
|
petrh
|
import os
|
2 |
|
|
|
3 |
|
|
CRAWLED_DATA_PATH = "../CrawledData/"
|
4 |
|
|
PROCESSED_DATA_PATH = "../ProcessedData/"
|
5 |
|
|
CRAWLER_LOGS_PATH = "../CrawlerLogs/"
|
6 |
|
|
CRAWLER_PROGRAM_PATH = "../DatasetCrawler"
|
7 |
|
|
PROCESSOR_PROGRAM_PATH = "../DatasetProcessing"
|
8 |
|
|
CONFIG_FILES_PATH = "../DatasetConfigs"
|
9 |
|
|
|
10 |
|
|
|
11 |
|
|
def create_default_config_file(dataset_name):
|
12 |
|
|
|
13 |
|
|
with open(CONFIG_FILES_PATH + "/" + dataset_name + ".yaml", "w") as file:
|
14 |
|
|
file.write("# jmeno datasetu, pod kterym bude zobrazen v aplikaci\n")
|
15 |
|
|
file.write("dataset-name: " + dataset_name + "\n")
|
16 |
|
|
file.write("# pozice jednotlivych zarizeni, ktera jsou v datasetu\n")
|
17 |
|
|
file.write("devices:\n")
|
18 |
|
|
file.write(" - example1:\n")
|
19 |
|
|
file.write(" x: 12.3\n")
|
20 |
|
|
file.write(" y: 32.1\n")
|
21 |
|
|
file.write("\n")
|
22 |
|
|
file.write(" - example2:\n")
|
23 |
|
|
file.write(" x: 32.1\n")
|
24 |
|
|
file.write(" y: 12.3\n")
|
25 |
|
|
file.write("\n")
|
26 |
|
|
file.write("# root slozka, ktera obsahuje odkazy na dataset\n")
|
27 |
|
|
file.write("url: ZDE VLOZTE URL/\n")
|
28 |
|
|
file.write("# volitelný parameter, který specifikuje vzor jména datasetů, které se budou stahovat\n")
|
29 |
|
|
file.write("regex: ZDE VLOZTE REGEX\n")
|
30 |
|
|
file.write("# volitelny parametr, ktery udava jak casto se budou hledat nove datasety, pokud prazdne, "
|
31 |
|
|
"tak defaultni hodnota (dny)\n")
|
32 |
|
|
file.write("update-period: ZDE VLOZTE HODNOTU\n")
|
33 |
|
|
|
34 |
|
|
|
35 |
|
|
def create_default_processor(dataset_name):
|
36 |
|
|
with open(PROCESSOR_PROGRAM_PATH + "/" + dataset_name + "Processor.py", "w") as file:
|
37 |
|
|
file.write("def process_file(filename):\n")
|
38 |
|
|
file.write(" print(\"You must implements process_file method first!\")\n")
|
39 |
|
|
|
40 |
|
|
|
41 |
|
|
def create_default_crawler(dataset_name):
|
42 |
|
|
|
43 |
|
|
with open(CRAWLER_PROGRAM_PATH + "/" + dataset_name + "Crawler.py", "w") as file:
|
44 |
|
|
file.write("def crawl(config):\n")
|
45 |
|
|
file.write(" print(\"You must implements Crawl method first!\")\n")
|
46 |
|
|
|
47 |
|
|
|
48 |
|
|
def create_ignore_file(path,text):
|
49 |
|
|
|
50 |
|
|
with open(path + "/ignore.txt", "w") as file:
|
51 |
|
|
if text is not None:
|
52 |
|
|
file.write(text + "\n")
|
53 |
|
|
|
54 |
|
|
|
55 |
|
|
def prepare_dataset_structure(dataset_name):
|
56 |
|
|
jump_folder = "../"
|
57 |
|
|
|
58 |
|
|
# create folder for crawled data
|
59 |
|
|
try:
|
60 |
|
|
path = CRAWLED_DATA_PATH+dataset_name
|
61 |
|
|
os.mkdir(path)
|
62 |
|
|
create_ignore_file(path,"ignore.txt")
|
63 |
|
|
except os.error as e:
|
64 |
|
|
print(e)
|
65 |
|
|
print("Creation of the directory %s failed" % path)
|
66 |
|
|
|
67 |
|
|
# create folder for processed data
|
68 |
|
|
try:
|
69 |
|
|
path = PROCESSED_DATA_PATH + dataset_name
|
70 |
|
|
os.mkdir(path)
|
71 |
|
|
create_ignore_file(path, "ignore.txt")
|
72 |
|
|
except OSError:
|
73 |
|
|
print("Creation of the directory %s failed" % path)
|
74 |
|
|
|
75 |
|
|
# create folder for crawler logs
|
76 |
|
|
try:
|
77 |
|
|
path = CRAWLER_LOGS_PATH + dataset_name
|
78 |
|
|
os.mkdir(path)
|
79 |
|
|
create_ignore_file(path, None)
|
80 |
|
|
except OSError:
|
81 |
|
|
print("Creation of the directory %s failed" % path)
|
82 |
|
|
|
83 |
|
|
create_default_crawler(dataset_name)
|
84 |
|
|
create_default_processor(dataset_name)
|
85 |
|
|
create_default_config_file(dataset_name)
|
86 |
|
|
|
87 |
|
|
|
88 |
|
|
prepare_dataset_structure("JIS")
|