diff --git a/Elastic/addIndex.py b/Elastic/addIndex.py index b6a10dc..20be014 100644 --- a/Elastic/addIndex.py +++ b/Elastic/addIndex.py @@ -11,7 +11,7 @@ docType = "doc" # by default we connect to localhost:9200 -es = Elasticsearch(['https://5e9acbee.ngrok.io/']) +es = Elasticsearch(['http://node1.research.tib.eu:9200/']) path_to_data = '/app/' # path_to_data = '../' diff --git a/main.py b/main.py index cfc76fb..d626a3f 100644 --- a/main.py +++ b/main.py @@ -18,7 +18,6 @@ nlp = spacy.load('en_core_web_sm') -#wikidataSPARQL="https://17d140f2.ngrok.io/sparql" wikidataSPARQL="http://node3.research.tib.eu:4010/sparql" stopWordsList=wiki_stopwords.getStopWords() @@ -470,9 +469,8 @@ def split_base_on_s(combinations): result.append(comb) return result -def process_word_E_long(question): +def process_word_E_long(question, k=1): global count - k=1 entities=[] @@ -518,11 +516,10 @@ def process_word_E_long(question): results.append(entity) return [[entity[1],entity[4]] for entity in results] -def process_word_E(question): +def process_word_E(question,k=1): #print(question) startTime=time.time() global count - k=1 entities=[] question=question.replace("?","") @@ -546,7 +543,7 @@ def process_word_E(question): return [[entity[1],entity[2]] for entity in entities] def process_text_E_R(question,k=1): - raw=evaluate([question]) + raw=evaluate([question],k) #time=raw[1] #print(raw) question=question.replace("?","") @@ -592,7 +589,7 @@ def split_bas_on_comparison(combinations): return new_comb,compare_found -def evaluate(raw): +def evaluate(raw,k): <<<<<<< HEAD evaluation=False startTime=time.time() @@ -611,7 +608,6 @@ def evaluate(raw): r_entity=0 p_relation=0 r_relation=0 - k=1 correct=True questionRelationsNumber=0 entities=[] @@ -830,7 +826,7 @@ def datasets_evaluate(dataset_file): if __name__ == '__main__': #datasets_evaluate() - process_text_E_R('What is the operating income for Qantas?') + process_text_E_R('What is the operating income for Qantas?', 5)