From 4480b8b45b1d39b70d713c6c9121d67749af565f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ch=C3=A9rifBALDE?= Date: Sat, 23 Apr 2022 13:34:21 +0200 Subject: [PATCH] 23/04/22 - 13h30 --- Ela_Spacy.py | 2 +- data_indexees.csv | 39 +++++++++++++++++- ela_index_bdd_classes.py | 66 +++++++++++++++++-------------- ela_output_test_file_pandas_2.txt | 38 +++++++++++++++++- prj_common.py | 11 +++--- wrapper.py | 45 ++++++++++++++++++--- 6 files changed, 155 insertions(+), 46 deletions(-) diff --git a/Ela_Spacy.py b/Ela_Spacy.py index 49bbad6..c90f67b 100644 --- a/Ela_Spacy.py +++ b/Ela_Spacy.py @@ -214,7 +214,7 @@ def Ela_remove_pronoun(tab_tokens): mytok = nlp(str(token).lower()) for token2 in mytok: if token2.pos_ != 'DET' and token2.pos_ != 'CCONJ' and token2.pos_ != 'ADP': - mywords.append(mytok) + mywords.append(str(mytok)) return mywords ''' diff --git a/data_indexees.csv b/data_indexees.csv index 7e8ebb4..0a29f71 100644 --- a/data_indexees.csv +++ b/data_indexees.csv @@ -1,2 +1,39 @@ ,index,mots,occurence,moyenne,id_formation,source_field -0,0,formation4,0,0.0,formation4,default +0,0," +",8,0.15,mysy_ytubes_04,description +1,1,depart,1,0.02,mysy_ytubes_04,description +2,2,resultat,1,0.02,mysy_ytubes_04,description +3,3,dcg,1,0.02,mysy_ytubes_04,description +4,4,bts,1,0.02,mysy_ytubes_04,description +5,5," + +",1,0.02,mysy_ytubes_04,description +6,6,gratuit,1,0.02,mysy_ytubes_04,description +7,7,marg,1,0.02,mysy_ytubes_04,description +8,8,analys,1,0.02,mysy_ytubes_04,description +9,9,notion,1,0.02,mysy_ytubes_04,description +10,10,paris,1,0.02,mysy_ytubes_04,description +11,11,villetaneuse,1,0.02,mysy_ytubes_04,description +12,12,iut,1,0.02,mysy_ytubes_04,description +13,13,zambotto,1,0.02,mysy_ytubes_04,description +14,14,cout,3,0.06,mysy_ytubes_04,description +15,15,cour,3,0.06,mysy_ytubes_04,description +16,16,different,1,0.02,mysy_ytubes_04,description +17,17,licenc,1,0.02,mysy_ytubes_04,description +18,18,calcul,1,0.02,mysy_ytubes_04,description +19,19,professeur,1,0.02,mysy_ytubes_04,description +20,20,corinne,1,0.02,mysy_ytubes_04,description +21,21,lign,1,0.02,mysy_ytubes_04,description +22,22,universit,1,0.02,mysy_ytubes_04,description +23,23,charg,1,0.02,mysy_ytubes_04,description +24,24,nord,1,0.02,mysy_ytubes_04,description +25,25,comptabilit,4,0.08,mysy_ytubes_04,description +26,26,sorbonne,1,0.02,mysy_ytubes_04,description +27,27,general,1,0.02,mysy_ytubes_04,description +28,28,debut,1,0.02,mysy_ytubes_04,description +29,29,stmg,1,0.02,mysy_ytubes_04,description +30,30,niveau,1,0.02,mysy_ytubes_04,description +31,31,prix,1,0.02,mysy_ytubes_04,description +32,32,incorporees,1,0.02,mysy_ytubes_04,description +33,33,gea,2,0.04,mysy_ytubes_04,description +34,34,gestion,3,0.06,mysy_ytubes_04,description diff --git a/ela_index_bdd_classes.py b/ela_index_bdd_classes.py index be1129f..0976ebe 100644 --- a/ela_index_bdd_classes.py +++ b/ela_index_bdd_classes.py @@ -491,44 +491,50 @@ Cela revient a chercher "sentence" dans la table "ela_index", where "source_fiel Elle retourne l'id_externe de la formation ''' def ela_recherche_tokens_source_field(sentence, source_fied=""): + try: + if( len(str(source_fied)) == 0): + mycommon.myprint(str(inspect.stack()[0][3])+" - source_fied est vide") + return False - if( len(str(source_fied)) == 0): - mycommon.myprint(str(inspect.stack()[0][3])+" - source_fied est vide") - return False + # Verification que source_field correspond a la bonne colonne + field_list = ['default', 'title', 'description','objectif',] + if source_fied not in field_list: + mycommon.myprint(str(inspect.stack()[0][3])+" - Le champ '" + source_fied + "' n'existe pas, requete annulée") + return False - # Verification que source_field correspond a la bonne colonne - field_list = ['default', 'title', 'description','objectif'] - if source_fied not in field_list: - mycommon.myprint(str(inspect.stack()[0][3])+" - Le champ '" + source_fied + "' n'existe pas, requete annulée") - return False + print(" ici: sentence = "+sentence+", -- source_fied ="+source_fied) + tab_training_id = [] + tab_tokens = ls.Ela_Tokenize(sentence) + tab_tokens2 = ls.Ela_remove_stop_words(tab_tokens) + tab_tokens3 = ls.Ela_remove_pronoun(tab_tokens2) + tab_tokens4 = ls.Ela_stemmize(tab_tokens3) + tab_tokens4.sort() - tab_training_id = [] - tab_tokens = ls.Ela_Tokenize(sentence) - tab_tokens2 = ls.Ela_remove_stop_words(tab_tokens) - tab_tokens3 = ls.Ela_remove_pronoun(tab_tokens2) - tab_tokens4 = ls.Ela_stemmize(tab_tokens3) - tab_tokens4.sort() + ''' + tab_tokens4 contient la liste des mots à rechercher. + En suite pour chaque mot de "tab_tokens4" on regarde dans la table "elaindex" + a quoi cela pourrait correspondre. + ''' - ''' - tab_tokens4 contient la liste des mots à rechercher. - En suite pour chaque mot de "tab_tokens4" on regarde dans la table "elaindex" - a quoi cela pourrait correspondre. - ''' - - collection = db["elaindex"] + collection = db["elaindex"] - for token in tab_tokens4: - print(" #### Token rechercher dans l'index est : "+str(token)) - for doc in collection.find({"mots":token, "source_field":source_fied}): - #print(" Trouvé dans le cours N° "+str(doc["id_formation"])) - if str(doc["id_formation"]) not in tab_training_id : - tab_training_id.append(str(doc["id_formation"])) + for token in tab_tokens4: + print(" #### Token rechercher dans l'index est : '"+str(token)+"' et le source_field = '"+str(source_fied)+"' ") + for doc in collection.find({"mots":token, "source_field":source_fied}): + #print(" Trouvé dans le cours N° "+str(doc["id_formation"])) + if str(doc["id_formation"]) not in tab_training_id : + tab_training_id.append(str(doc["id_formation"])) - #print(" Pour la phrase "+str(sentence)+", voici les formations correspondantes") - #print(tab_training_id) + #print(" Pour la phrase "+str(sentence)+", voici les formations correspondantes") + #print(tab_training_id) - return tab_training_id + return tab_training_id + + except Exception as e: + exc_type, exc_obj, exc_tb = sys.exc_info() + mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno)) + return False, "Traitement impossible " '''' Cette fonction indexe les articles et avis. diff --git a/ela_output_test_file_pandas_2.txt b/ela_output_test_file_pandas_2.txt index 1316fd7..404b8d6 100644 --- a/ela_output_test_file_pandas_2.txt +++ b/ela_output_test_file_pandas_2.txt @@ -1,2 +1,36 @@ - mots occurence moyenne id_formation source_field -0 formation4 0 0.0 formation4 default \ No newline at end of file + mots occurence moyenne id_formation source_field +0 \n 8 0.15 mysy_ytubes_04 description +1 depart 1 0.02 mysy_ytubes_04 description +2 resultat 1 0.02 mysy_ytubes_04 description +3 dcg 1 0.02 mysy_ytubes_04 description +4 bts 1 0.02 mysy_ytubes_04 description +5 \n\n 1 0.02 mysy_ytubes_04 description +6 gratuit 1 0.02 mysy_ytubes_04 description +7 marg 1 0.02 mysy_ytubes_04 description +8 analys 1 0.02 mysy_ytubes_04 description +9 notion 1 0.02 mysy_ytubes_04 description +10 paris 1 0.02 mysy_ytubes_04 description +11 villetaneuse 1 0.02 mysy_ytubes_04 description +12 iut 1 0.02 mysy_ytubes_04 description +13 zambotto 1 0.02 mysy_ytubes_04 description +14 cout 3 0.06 mysy_ytubes_04 description +15 cour 3 0.06 mysy_ytubes_04 description +16 different 1 0.02 mysy_ytubes_04 description +17 licenc 1 0.02 mysy_ytubes_04 description +18 calcul 1 0.02 mysy_ytubes_04 description +19 professeur 1 0.02 mysy_ytubes_04 description +20 corinne 1 0.02 mysy_ytubes_04 description +21 lign 1 0.02 mysy_ytubes_04 description +22 universit 1 0.02 mysy_ytubes_04 description +23 charg 1 0.02 mysy_ytubes_04 description +24 nord 1 0.02 mysy_ytubes_04 description +25 comptabilit 4 0.08 mysy_ytubes_04 description +26 sorbonne 1 0.02 mysy_ytubes_04 description +27 general 1 0.02 mysy_ytubes_04 description +28 debut 1 0.02 mysy_ytubes_04 description +29 stmg 1 0.02 mysy_ytubes_04 description +30 niveau 1 0.02 mysy_ytubes_04 description +31 prix 1 0.02 mysy_ytubes_04 description +32 incorporees 1 0.02 mysy_ytubes_04 description +33 gea 2 0.04 mysy_ytubes_04 description +34 gestion 3 0.06 mysy_ytubes_04 description \ No newline at end of file diff --git a/prj_common.py b/prj_common.py index 6d622ad..6e19242 100644 --- a/prj_common.py +++ b/prj_common.py @@ -197,7 +197,11 @@ def get_user_email_from_recid(recid = ""): coll_token = dbname['user_account'] tmp_val = coll_token.find({'recid': str(recid), 'active': '1'}) - user_email = tmp_val[0]['email'] + if( tmp_val and tmp_val[0] and tmp_val[0]['email']): + user_email = tmp_val[0]['email'] + else: + return False, "Impossible de supprimer le compte utilisateur" + return user_email except Exception as e: @@ -205,11 +209,6 @@ def get_user_email_from_recid(recid = ""): myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno)) return False, "Impossible de supprimer le compte utilisateur" - - - - - ''' recuperation du recid du partner ''' diff --git a/wrapper.py b/wrapper.py index 0d59aa4..a75d97d 100644 --- a/wrapper.py +++ b/wrapper.py @@ -217,6 +217,15 @@ def get_all_class(diction): ''' Cette fonction recherche les formations correspondant à un text +IMPORTANT : Dans la recherche par tips, on fait du && et non du ou. +exemple : +pour la recherche "description:"fichier" title:"Niveau 1"" +le système va chercher toutes les formation qui on : +- "description:"fichier" ET ET ET ET ET +- title:"Niveau 1" + +En gros, on fait une intersection entre les 2 listes. + ''' def recherche_text_simple(diction): @@ -375,6 +384,7 @@ def recherche_text_simple(diction): tips = re.findall(regexp, search_text, re.MULTILINE) nb_tips = len(tips) + final_message3 = {} if( nb_tips > 0 ): mycommon.myprint(" Une recherche par tips a été identifiée") @@ -385,26 +395,35 @@ def recherche_text_simple(diction): final_message = [] insertObject = [] + is_first_tip = True for val in tips : val2 = val.replace('"', '') new_diction['search_text'] = val2 print(" On va recherche : #####"+str(new_diction['search_text'] )) retval, message = recherche_tips_ret_ref(new_diction) - #print(" pour la recherche de : "+str(new_diction)+" -- Voici le resultat "+str(message)) + print(" pour la recherche de : "+str(new_diction)+" -- Voici le resultat "+str(message)) if (retval == True ): final_retval = True + #print(" contact de message "+str(message)+ " # final_message "+str(final_message)) + + if( is_first_tip is True) : + final_message3 = message + else: + final_message3 = [x for x in message if x in final_message3] + + print(" final_message3 = "+str(final_message3)) final_message.append(message) + is_first_tip = False - + final_message = final_message3 #print(" liste defitive des ref "+str(final_message)) final_message2 = [] for t in final_message: if( t is not False): - for y in t: - #print( " unique = "+str(y)) - final_message2.append(y) + #print( " unique = "+str(t)) + final_message2.append(t) coll_name = dbname['myclass'] @@ -845,6 +864,8 @@ def recherche_tips_ret_ref(diction): mycommon.myprint(str(inspect.stack()[0][3])+" - le mot '"+val1+" ne correspond aucun tips, sorry") return False, " le mot '"+val1+" ne correspond aucun tips, sorry" + if (str(my_tips) == "desc"): + my_tips = 'description' mycommon.myprint(" Le tips recherché est "+my_tips+". Sa distance de Levenshtein ="+str(min)) @@ -857,6 +878,9 @@ def recherche_tips_ret_ref(diction): tab_training = [] tab_training = ela_index.ela_recherche_tokens_source_field(str(chaine[1]).lower(), str(my_tips) ) + if( tab_training is False): + return False, tab_training + #mycommon.myprint(" pour phrase : #" + str(chaine[1]).lower() + "# , Pour le tips #"+ str(my_tips)+"#, voici la liste des formations") #mycommon.myprint(tab_training) @@ -978,8 +1002,14 @@ def recherche_tips(diction): mycommon.myprint(str(inspect.stack()[0][3])+" - le mot '"+val1+" ne correspond aucun tips, sorry") return False, " le mot '"+val1+" ne correspond aucun tips, sorry" + ''' + Remplacement de l'abreviation 'desc' par description ''' - mycommon.myprint(" Le tips recherché est "+my_tips+". Sa distance de Levenshtein ="+str(min)) + if( str(my_tips) == "desc"): + my_tips = 'description' + + + mycommon.myprint(" Le tips recherché est '"+my_tips+"'. Sa distance de Levenshtein ="+str(min)) tab_ret = {} tab_ret['tips'] = str(my_tips) @@ -990,6 +1020,9 @@ def recherche_tips(diction): tab_training = [] tab_training = ela_index.ela_recherche_tokens_source_field(str(chaine[1]).lower(), str(my_tips) ) + if (tab_training is False): + return False, tab_training + mycommon.myprint(" pour phrase : #" + str(chaine[1]).lower() + "# , Pour le tips #"+ str(my_tips)+"#, voici la liste des formations") mycommon.myprint(tab_training)