01/04/22 - 10h30
parent
15b5aa2c60
commit
47ab74a287
25
Ela_Spacy.py
25
Ela_Spacy.py
|
@ -15,6 +15,7 @@ from unidecode import unidecode
|
|||
import inspect
|
||||
import sys, os
|
||||
from autocorrect import Speller
|
||||
from datetime import datetime
|
||||
|
||||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||||
|
||||
|
@ -36,6 +37,9 @@ lsc.update_stopWords(stopWords)
|
|||
lsc.update_token_fr_pontuation(token_fr_pontuation)
|
||||
spell_fr = Speller(lang='fr')
|
||||
|
||||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
dbname = client['cherifdb']
|
||||
|
||||
#print("token_fr_pontuation")
|
||||
#print(token_fr_pontuation)
|
||||
|
@ -85,7 +89,20 @@ sa correction orthographique ne français
|
|||
def correct_fr_word(word):
|
||||
|
||||
try:
|
||||
mydata = {}
|
||||
|
||||
print(" Fonction : correct_fr_word : '"+word+"' =======> "+spell_fr(word))
|
||||
mydata['mot'] = str(word)
|
||||
mydata['mot_corrected'] = str(spell_fr(word))
|
||||
mydata['date_update'] = str(datetime.now())
|
||||
mydata['treated'] = 0
|
||||
coll_name = dbname['correction_ortho']
|
||||
ret_val_tmp = coll_name.insert_one(mydata)
|
||||
|
||||
if (ret_val_tmp is False):
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " - Impossbile d'inserer le mot "+str(word)+" dans correction_ortho ")
|
||||
|
||||
|
||||
return spell_fr(word)
|
||||
|
||||
except Exception as e:
|
||||
|
@ -216,7 +233,11 @@ def Ela_stemmize(tab_tokens):
|
|||
print(" VERIFICATION SI LE MOT EST FR : " + str(tab_tokens))
|
||||
for mot in tab_tokens:
|
||||
if( mycommon.check_word_in_fr_dict(str(mot)) ):
|
||||
tab_ret_val.append(stemmer.stem(mot))
|
||||
|
||||
if( type(mot) is str ):
|
||||
tab_ret_val.append(stemmer.stem(mot))
|
||||
else:
|
||||
tab_ret_val.append(stemmer.stem(mot.text))
|
||||
else:
|
||||
tab_ret_val.append(mot)
|
||||
|
||||
|
@ -359,7 +380,7 @@ def Ela_list_to_mongo(tab_tokens, traning_id, source_field):
|
|||
final_lists = []
|
||||
for tmp in unique_list:
|
||||
moyenne = round(int(occurrences[str(tmp)]) / size_tab, 2)
|
||||
final_lists.append([str(tmp), str(occurrences[str(tmp)]), str(moyenne), str(traning_id), str(source_field)])
|
||||
final_lists.append([str(tmp), int(str(occurrences[str(tmp)])), str(moyenne), str(traning_id), str(source_field)])
|
||||
|
||||
print(final_lists)
|
||||
data = pd.DataFrame(final_lists, columns=('mots', "occurence", 'moyenne', 'id_formation', 'source_field'))
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
,index,mots,occurence,moyenne,id_formation,source_field
|
||||
0,0,recherch,4,0.67,AUTO_CH03,default
|
||||
1,1,mot,2,0.33,AUTO_CH03,default
|
||||
0,0,mot,2,0.33,AUTO_CH03,default
|
||||
1,1,recherch,4,0.67,AUTO_CH03,default
|
||||
|
|
|
|
@ -470,7 +470,7 @@ def ela_recherche_tokens(sentence):
|
|||
|
||||
for result in collection.aggregate(pipe2):
|
||||
tab_training_id.append(str(result["_id"]))
|
||||
print(result)
|
||||
#print(result)
|
||||
|
||||
print("resultat tab_training_id = "+str(tab_training_id))
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
mots occurence moyenne id_formation source_field
|
||||
0 recherch 4 0.67 AUTO_CH03 default
|
||||
1 mot 2 0.33 AUTO_CH03 default
|
||||
mots occurence moyenne id_formation source_field
|
||||
0 mot 2 0.33 AUTO_CH03 default
|
||||
1 recherch 4 0.67 AUTO_CH03 default
|
Loading…
Reference in New Issue