20/05/22 - 13h30
parent
581ffd0a37
commit
8cc08d92b5
|
@ -375,7 +375,7 @@ Aussi, on fait le choix de liberé de ne pas stémiser certain mots'''
|
|||
def Ela_stemmize_search(tab_tokens):
|
||||
try:
|
||||
tab_ret_val = []
|
||||
print(" VERIFICATION SI LE MOT DOIT ETRE STEMISE: " + str(tab_tokens))
|
||||
#print(" VERIFICATION SI LE MOT DOIT ETRE STEMISE: " + str(tab_tokens))
|
||||
for mot in tab_tokens:
|
||||
if (mycommon.Word_Not_Stemmize(str(mot))):
|
||||
tab_ret_val.append(unidecode(mot))
|
||||
|
@ -383,7 +383,7 @@ def Ela_stemmize_search(tab_tokens):
|
|||
'''
|
||||
On fait la correction orthographe avant
|
||||
'''
|
||||
print("AVANT CORRECTION ORTHOGRAHIQUE ="+str(mot)+" ==> APRES = "+unidecode(str(MYSY_GV.spell.correction(mot))))
|
||||
#print("AVANT CORRECTION ORTHOGRAHIQUE ="+str(mot)+" ==> APRES = "+unidecode(str(MYSY_GV.spell.correction(mot))))
|
||||
corrected_str = str(MYSY_GV.spell.correction(mot))
|
||||
|
||||
|
||||
|
@ -405,7 +405,7 @@ def Ela_stemmize_search(tab_tokens):
|
|||
tab_ret_val.append( unidecode (str(MYSY_GV.stemmer.stem(corrected_str))))
|
||||
'''
|
||||
|
||||
print(" STMISATION TAB = "+str(tab_ret_val))
|
||||
#print(" STMISATION TAB = "+str(tab_ret_val))
|
||||
|
||||
return True, tab_ret_val
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ def get_all_articles_avis(diction):
|
|||
insertObject.append(JSONEncoder().encode(user))
|
||||
val_tmp = val_tmp + 1
|
||||
|
||||
print(" getObject = ", str(insertObject))
|
||||
#print(" getObject = ", str(insertObject))
|
||||
return True, insertObject
|
||||
|
||||
except Exception as e:
|
||||
|
@ -481,7 +481,7 @@ def recherche_articles_avis(diction):
|
|||
str(inspect.stack()[0][3]) + " - Impossible de recuperer le token de l'utilisateur")
|
||||
return False, " Impossible de desactiver l'objectif utilisateur"
|
||||
|
||||
mycommon.myprint(" On recehrche la phrase +'" + search_text + "' + user_recid = " + user_recid)
|
||||
#mycommon.myprint(" On recehrche la phrase +'" + search_text + "' + user_recid = " + user_recid)
|
||||
# Enregistrerment de la recherche
|
||||
retval, message, store_recherche_Id = store_recherche_article_avis(diction, user_recid)
|
||||
if (retval is False):
|
||||
|
@ -489,8 +489,11 @@ def recherche_articles_avis(diction):
|
|||
|
||||
tab_training = []
|
||||
tab_training = ela_index.ela_recherche_article_avis_tokens(search_text)
|
||||
mycommon.myprint(" pour phrase : #" + search_text + "#, voici la liste des formations")
|
||||
mycommon.myprint(tab_training)
|
||||
'''
|
||||
Pour analyser la recherche, decommenter les 2 lignes ci-dessous
|
||||
'''
|
||||
#mycommon.myprint(" pour phrase : #" + search_text + "#, voici la liste des formations")
|
||||
#mycommon.myprint(tab_training)
|
||||
|
||||
coll_name = dbname['articles_avis']
|
||||
|
||||
|
@ -631,7 +634,7 @@ def store_recherche_article_avis(diction, user_recid=""):
|
|||
coll_name = dbname['user_recherche']
|
||||
|
||||
# Si le champ "id" est renseigné, il s'agit d'une mis jour
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " on va stocker "+str(mydata)+" id = "+str(mydata_id))
|
||||
#mycommon.myprint(str(inspect.stack()[0][3]) + " on va stocker "+str(mydata)+" id = "+str(mydata_id))
|
||||
|
||||
if( len(str(mydata_id)) > 0 ):
|
||||
ret_val = coll_name.find_one_and_update({'_id': ObjectId(str(mydata_id)), 'valide': '1'},
|
||||
|
@ -652,7 +655,7 @@ def store_recherche_article_avis(diction, user_recid=""):
|
|||
|
||||
if ret_val and ret_val.inserted_id:
|
||||
nb_doc = ret_val.inserted_id
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " La recherche a été bien enregistrée. Id = '" + str(nb_doc)+"' ")
|
||||
#mycommon.myprint(str(inspect.stack()[0][3]) + " La recherche a été bien enregistrée. Id = '" + str(nb_doc)+"' ")
|
||||
return True, "La recherche a bien été mise à jour", nb_doc
|
||||
else:
|
||||
mycommon.myprint(
|
||||
|
@ -727,7 +730,7 @@ def get_article_avis_alaune(diction):
|
|||
user = x
|
||||
insertObject.append(JSONEncoder().encode(user))
|
||||
|
||||
print(" getObject = ", str(insertObject))
|
||||
#print(" getObject = ", str(insertObject))
|
||||
return True, insertObject
|
||||
|
||||
|
||||
|
|
|
@ -513,13 +513,13 @@ def ela_recherche_tokens(sentence):
|
|||
De manière plus globale, il faudrait exclure certains mot de la racinisation.
|
||||
|
||||
'''
|
||||
print(" VERIF : "+str(tab_tokens3))
|
||||
#print(" VERIF : "+str(tab_tokens3))
|
||||
|
||||
status, tab_tokens4 = ls.Ela_stemmize_search(tab_tokens3)
|
||||
if( status is False):
|
||||
return False
|
||||
|
||||
print(" VERIF APRES STEMISATION : " + str(tab_tokens4))
|
||||
#print(" VERIF APRES STEMISATION : " + str(tab_tokens4))
|
||||
|
||||
tab_tokens4.sort()
|
||||
|
||||
|
@ -532,14 +532,14 @@ def ela_recherche_tokens(sentence):
|
|||
# collection
|
||||
collection = MYSY_GV.dbname["elaindex"]
|
||||
|
||||
print(" mot recherché tab_tokens4 (initialement) = "+str(tab_tokens4))
|
||||
#print(" mot recherché tab_tokens4 (initialement) = "+str(tab_tokens4))
|
||||
|
||||
status, new_tab_tokens4 = mysy_recherche_levenshtein(tab_tokens4)
|
||||
if( status is False):
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " Recherche : mysy_recherche_levenshtein return FALSE ")
|
||||
return []
|
||||
|
||||
print(" ### les nouveaux mots recherchés new_tab_tokens4 = " + str(new_tab_tokens4))
|
||||
#print(" ### les nouveaux mots recherchés new_tab_tokens4 = " + str(new_tab_tokens4))
|
||||
|
||||
pipe2 = [{ '$match': { 'mots': { '$in': new_tab_tokens4 } } },
|
||||
{ '$group': {
|
||||
|
@ -555,7 +555,7 @@ def ela_recherche_tokens(sentence):
|
|||
tab_training_id.append(str(result["_id"]))
|
||||
#print(result)
|
||||
|
||||
print("resultat tab_training_id = "+str(tab_training_id))
|
||||
#print("resultat tab_training_id = "+str(tab_training_id))
|
||||
|
||||
|
||||
|
||||
|
@ -590,22 +590,23 @@ def mysy_recherche_levenshtein(tab_mot):
|
|||
Si le mot appartient à la collection "word_not_stem", alors il faut faire un recherche strict
|
||||
pas de recherche elastique à faire.
|
||||
'''
|
||||
print("### + #### '" + str(mot) + "' mycommon.Word_Not_Stemmize(str(mot) = '" +
|
||||
'''print("### + #### '" + str(mot) + "' mycommon.Word_Not_Stemmize(str(mot) = '" +
|
||||
str(mycommon.Word_Not_Stemmize(str(mot))))
|
||||
'''
|
||||
if (mycommon.Word_Not_Stemmize(str(mot)) is True):
|
||||
# "recherche strict"
|
||||
if (str(mot) not in training_mots):
|
||||
print(" Le mot " + str(mot) + " est dans la table 'word_not_stem', donc pas de recherche elastique")
|
||||
#print(" Le mot " + str(mot) + " est dans la table 'word_not_stem', donc pas de recherche elastique")
|
||||
training_mots.append(str(mot))
|
||||
|
||||
else:
|
||||
|
||||
if( len( mot ) > 2):
|
||||
mot_first_caract = mot[0:3]
|
||||
print(" Traitement de " + str(mot) + " = 3 first => " + str(mot_first_caract))
|
||||
#print(" Traitement de " + str(mot) + " = 3 first => " + str(mot_first_caract))
|
||||
|
||||
v = {'$regex': "^" + mot_first_caract}
|
||||
print("v = " + str(v))
|
||||
#print("v = " + str(v))
|
||||
|
||||
for x in coll_name.find({'mots': v}):
|
||||
cmp_levenshtein = int(mycommon.levenshtein(mot, str(x['mots'])))
|
||||
|
@ -679,13 +680,13 @@ def ela_recherche_tokens_source_field(sentence, source_fied=""):
|
|||
De manière plus globale, il faudrait exclure certains mot de la racinisation.
|
||||
|
||||
'''
|
||||
print(" VERIF : " + str(tab_tokens3))
|
||||
#print(" VERIF : " + str(tab_tokens3))
|
||||
|
||||
status, tab_tokens4 = ls.Ela_stemmize_search(tab_tokens3)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
print(" VERIF APRES STEMISATION : " + str(tab_tokens4))
|
||||
#print(" VERIF APRES STEMISATION : " + str(tab_tokens4))
|
||||
|
||||
tab_tokens4.sort()
|
||||
|
||||
|
@ -702,10 +703,10 @@ def ela_recherche_tokens_source_field(sentence, source_fied=""):
|
|||
mycommon.myprint(str(inspect.stack()[0][3]) + " Recherche : mysy_recherche_levenshtein return FALSE ")
|
||||
return []
|
||||
|
||||
print(" ### les nouveaux mots recherchés new_tab_tokens4 = " + str(new_tab_tokens4))
|
||||
#print(" ### les nouveaux mots recherchés new_tab_tokens4 = " + str(new_tab_tokens4))
|
||||
|
||||
for token in new_tab_tokens4:
|
||||
print(" #### Token rechercher dans l'index est : '"+str(token)+"' et le source_field = '"+str(source_fied)+"' ")
|
||||
#print(" #### Token rechercher dans l'index est : '"+str(token)+"' et le source_field = '"+str(source_fied)+"' ")
|
||||
for doc in collection.find({"mots":token, "source_field":source_fied}):
|
||||
#print(" Trouvé dans le cours N° "+str(doc["id_formation"]))
|
||||
if str(doc["id_formation"]) not in tab_training_id :
|
||||
|
@ -826,7 +827,7 @@ def ela_recherche_article_avis_tokens(sentence):
|
|||
De manière plus globale, il faudrait exclure certains mot de la racinisation.
|
||||
|
||||
'''
|
||||
print(" VERIF : " + str(tab_tokens3))
|
||||
#print(" VERIF : " + str(tab_tokens3))
|
||||
tab_corrected_word = []
|
||||
for mot in tab_tokens3:
|
||||
mycommon.recherche_check_word_in_fr_dict(str(mot))
|
||||
|
@ -853,7 +854,7 @@ def ela_recherche_article_avis_tokens(sentence):
|
|||
# collection
|
||||
collection = MYSY_GV.dbname["elaindex_article_avis"]
|
||||
|
||||
print(" mot recherché tab_tokens4 = " + str(tab_tokens4))
|
||||
#print(" mot recherché tab_tokens4 = " + str(tab_tokens4))
|
||||
|
||||
pipe2 = [{'$match': {'mots': {'$in': tab_tokens4}}},
|
||||
{'$group': {
|
||||
|
@ -869,7 +870,7 @@ def ela_recherche_article_avis_tokens(sentence):
|
|||
tab_training_id.append(str(result["_id"]))
|
||||
# print(result)
|
||||
|
||||
print("resultat tab_training_id = " + str(tab_training_id))
|
||||
#print("resultat tab_training_id = " + str(tab_training_id))
|
||||
|
||||
return tab_training_id
|
||||
|
||||
|
|
2
main.py
2
main.py
|
@ -408,7 +408,7 @@ sans utilisation de critère ni de tag
|
|||
def recherche_text_simple():
|
||||
# On recupere le corps (payload) de la requete
|
||||
payload = request.form.to_dict()
|
||||
print(" ### payload = ", payload)
|
||||
#print(" ### payload = ", payload)
|
||||
status, result = wp.recherche_text_simple(payload)
|
||||
return jsonify(status=status, message=result)
|
||||
|
||||
|
|
|
@ -86,13 +86,13 @@ def mysy_recherche_levenshtein(tab_mot):
|
|||
Si le mot appartient à la collection "word_not_stem", alors il faut faire un recherche strict
|
||||
pas de recherche elastique à faire.
|
||||
'''
|
||||
print("### + #### '"+str(mot)+"' mycommon.Word_Not_Stemmize(str(mot) = '"+
|
||||
'''print("### + #### '"+str(mot)+"' mycommon.Word_Not_Stemmize(str(mot) = '"+
|
||||
mycommon.Word_Not_Stemmize(str(mot)))
|
||||
|
||||
'''
|
||||
if (mycommon.Word_Not_Stemmize(str(mot)) is False):
|
||||
#"recherche strict"
|
||||
if ( str(mot) not in training_mots):
|
||||
print(" Le mot "+str(mot)+" est dans la table 'word_not_stem', donc pas de recherche elastique")
|
||||
#print(" Le mot "+str(mot)+" est dans la table 'word_not_stem', donc pas de recherche elastique")
|
||||
training_mots.append(str(mot))
|
||||
|
||||
else:
|
||||
|
@ -103,10 +103,10 @@ def mysy_recherche_levenshtein(tab_mot):
|
|||
'''
|
||||
if( len( mot ) > 2):
|
||||
mot_first_caract = mot[0:3]
|
||||
print(" Traitement de " + str(mot) + " = 3 first => " + str(mot_first_caract))
|
||||
#print(" Traitement de " + str(mot) + " = 3 first => " + str(mot_first_caract))
|
||||
|
||||
v = {'$regex': "^" + mot_first_caract}
|
||||
print("v = " + str(v))
|
||||
#print("v = " + str(v))
|
||||
|
||||
for x in coll_name.find({'mots': v}):
|
||||
cmp_levenshtein = int(mycommon.levenshtein(mot, str(x['mots'])))
|
||||
|
@ -124,10 +124,10 @@ def mysy_recherche_levenshtein(tab_mot):
|
|||
if (str(x['mots']) not in training_mots):
|
||||
training_mots.append(str(x['mots']))
|
||||
|
||||
elif (cmp_levenshtein > 3):
|
||||
'''elif (cmp_levenshtein > 3):
|
||||
print(" KOO00 cmp_levenshtein : " + mot + " CMP " + str(x['mots']) + " = " + str(cmp_levenshtein) +
|
||||
" ==> id_formation" + str(x['id_formation']) + " ==> occurrence = " + str(x['occurence']))
|
||||
|
||||
'''
|
||||
|
||||
return True, training_mots
|
||||
|
||||
|
|
116
wrapper.py
116
wrapper.py
|
@ -38,12 +38,16 @@ def get_recherche_gle_class(sentence):
|
|||
if not sentence:
|
||||
return False
|
||||
|
||||
mycommon.myprint(" On recehrche la phrase +'"+sentence+"'")
|
||||
#mycommon.myprint(" On recehrche la phrase +'"+sentence+"'")
|
||||
|
||||
tab_training = []
|
||||
tab_training = ela_index.ela_recherche_tokens(sentence)
|
||||
mycommon.myprint(" pour phrase : #"+sentence+"#, voici la liste des formations")
|
||||
mycommon.myprint(tab_training)
|
||||
|
||||
'''
|
||||
pour analyser la recherche, decommenter les 2 lignes ci-dessous
|
||||
'''
|
||||
#mycommon.myprint(" pour phrase : #"+sentence+"#, voici la liste des formations")
|
||||
#mycommon.myprint(tab_training)
|
||||
|
||||
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
@ -272,72 +276,6 @@ def recherche_text_simple(diction):
|
|||
token = diction['token']
|
||||
new_diction['token'] = diction['token']
|
||||
|
||||
certif_crit={}
|
||||
if ("certif" in diction.keys()):
|
||||
if diction['certif']:
|
||||
critere_date['certif'] = diction['certif']
|
||||
if (str(diction['certif']) != "NA"):
|
||||
certif_crit['certif'] = diction['certif']
|
||||
new_diction['certif'] = diction['certif']
|
||||
|
||||
price_crit = {}
|
||||
json = {}
|
||||
if ("price" in diction.keys()):
|
||||
if diction['price']:
|
||||
if( str( diction['price']) != "NA"):
|
||||
prices = str(diction['price']).split(":")
|
||||
if len(prices) > 2 :
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " - Le filtre 'price' a plus 2 informations")
|
||||
return False, " Le filtre prix est incorrect"
|
||||
price_crit = {'price': {'$gte': mycommon.tryInt(str(prices[0])), '$lte': mycommon.tryInt(str(prices[1]))}}
|
||||
|
||||
|
||||
|
||||
support_crit = {}
|
||||
if ("support" in diction.keys()):
|
||||
if diction['support']:
|
||||
critere_date['support'] = diction['support']
|
||||
if (str(diction['support']) != "NA"):
|
||||
support_crit['support'] = diction['support']
|
||||
new_diction['support'] = diction['support']
|
||||
|
||||
|
||||
type_crit = {}
|
||||
if ("type" in diction.keys()):
|
||||
if diction['type']:
|
||||
critere_date['type'] = diction['type']
|
||||
if (str(diction['type']) != "NA"):
|
||||
type_crit['type'] = diction['type']
|
||||
new_diction['type'] = diction['type']
|
||||
|
||||
|
||||
lang_crit = {}
|
||||
if ("lang" in diction.keys()):
|
||||
if diction['lang']:
|
||||
critere_date['lang'] = diction['lang']
|
||||
if (str(diction['lang']) != "NA"):
|
||||
lang_crit['lang'] = diction['lang']
|
||||
new_diction['lang'] = diction['lang']
|
||||
|
||||
|
||||
distance_crit = {}
|
||||
if ("distance" in diction.keys()):
|
||||
if diction['distance']:
|
||||
critere_date['distance'] = diction['distance']
|
||||
if (str(diction['distance']) != "0"):
|
||||
distance_crit['distance'] = diction['distance']
|
||||
new_diction['distance'] = diction['distance']
|
||||
|
||||
|
||||
duration_crit = {}
|
||||
if ("duration" in diction.keys()):
|
||||
if diction['duration']:
|
||||
if (str(diction['duration']) != "0"):
|
||||
duration_crit['duration'] = diction['duration']
|
||||
new_diction['duration'] = mycommon.tryFloat(diction['duration'])
|
||||
duration_crit = {
|
||||
'duree_formation': {'$lte': mycommon.tryInt( str(diction['duration']) )}}
|
||||
|
||||
'''
|
||||
/!\ Important : si le token est vide, alors c'est une recherche faite en mode non-connecté.
|
||||
on doit l'accepter.
|
||||
|
@ -363,7 +301,7 @@ def recherche_text_simple(diction):
|
|||
return False, " Impossible de desactiver l'objectif utilisateur"
|
||||
|
||||
|
||||
mycommon.myprint(" On recehrche la phrase +'" + search_text + "' + user_recid = "+user_recid)
|
||||
#mycommon.myprint(" On recehrche la phrase +'" + search_text + "' + user_recid = "+user_recid)
|
||||
|
||||
# Enregistrerment de la recherche
|
||||
retval, message, store_recherche_Id = store_recherche(diction, user_recid)
|
||||
|
@ -380,8 +318,8 @@ def recherche_text_simple(diction):
|
|||
|
||||
cleaned_search_text = mycommon.Parse_Clean_Search_Text(search_text)
|
||||
|
||||
print(" NOT CLEANED search_text = " + str(search_text))
|
||||
print(" CLEANED search_text = "+str(cleaned_search_text))
|
||||
#print(" NOT CLEANED search_text = " + str(search_text))
|
||||
#print(" CLEANED search_text = "+str(cleaned_search_text))
|
||||
|
||||
|
||||
|
||||
|
@ -521,8 +459,12 @@ def recherche_text_simple(diction):
|
|||
|
||||
tab_training = []
|
||||
tab_training = ela_index.ela_recherche_tokens(search_text)
|
||||
mycommon.myprint(" pour phrase : #" + search_text + "#, voici la liste des formations")
|
||||
mycommon.myprint(tab_training)
|
||||
|
||||
'''
|
||||
pour analyser la recherche, decommenter les 2 lignes ci-dessous
|
||||
'''
|
||||
#mycommon.myprint(" pour phrase : #" + search_text + "#, voici la liste des formations")
|
||||
#mycommon.myprint(tab_training)
|
||||
|
||||
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
@ -533,10 +475,10 @@ def recherche_text_simple(diction):
|
|||
insertObject = []
|
||||
|
||||
for val in tab_training:
|
||||
for x in coll_name.find(({"$and": [ {"external_code": str(val) },
|
||||
certif_crit,lang_crit, type_crit,price_crit,
|
||||
support_crit, duration_crit ]}
|
||||
), {"_id": 0, "indexed": 0, "indexed_desc": 0, "indexed_obj": 0, "indexed_title": 0, "valide": 0, "locked": 0, "partner_owner_recid": 0, }):
|
||||
for x in coll_name.find( {"external_code": str(val) },
|
||||
{"_id": 0, "indexed": 0, "indexed_desc": 0,
|
||||
"indexed_obj": 0, "indexed_title": 0, "valide": 0,
|
||||
"locked": 0, "partner_owner_recid": 0, }):
|
||||
nb_result = nb_result + 1
|
||||
my_recid = {}
|
||||
my_recid['user_rec_id'] = str(user_recid)
|
||||
|
@ -569,7 +511,10 @@ def recherche_text_simple(diction):
|
|||
'''
|
||||
/!\ Important : Recuperation des elements de la recherche etendue
|
||||
c'est a dire l'utilisation d'API externe
|
||||
'''
|
||||
|
||||
/!\ update du 20/05/22 : Cette approche relentie bcp le systeme avec l'appel externe.
|
||||
donc on annule
|
||||
|
||||
# aller chercher la recherche etendue et la rajouter ici.
|
||||
ext_status, external_code_prefixe = mycommon.Get_Extended_Result(search_text)
|
||||
|
||||
|
@ -598,7 +543,7 @@ def recherche_text_simple(diction):
|
|||
else:
|
||||
print(str(x['url'])+" existe deja, pas d'ajout à faire ")
|
||||
|
||||
|
||||
'''
|
||||
#print(insertObject)
|
||||
|
||||
''' en cas de resultat vide, enregsitrement de la requete de recherche avec les filtres associé'''
|
||||
|
@ -764,7 +709,7 @@ def store_recherche(diction, user_recid=""):
|
|||
coll_name = MYSY_GV.dbname['user_recherche']
|
||||
|
||||
# Si le champ "id" est renseigné, il s'agit d'une mis jour
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " on va stocker "+str(mydata)+" id = "+str(mydata_id))
|
||||
#mycommon.myprint(str(inspect.stack()[0][3]) + " on va stocker "+str(mydata)+" id = "+str(mydata_id))
|
||||
|
||||
if( len(str(mydata_id)) > 0 ):
|
||||
ret_val = coll_name.find_one_and_update({'_id': ObjectId(str(mydata_id)), 'valide': '1'},
|
||||
|
@ -785,7 +730,7 @@ def store_recherche(diction, user_recid=""):
|
|||
|
||||
if ret_val and ret_val.inserted_id:
|
||||
nb_doc = ret_val.inserted_id
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " La recherche a été bien enregistrée. Id = '" + str(nb_doc)+"' ")
|
||||
#mycommon.myprint(str(inspect.stack()[0][3]) + " La recherche a été bien enregistrée. Id = '" + str(nb_doc)+"' ")
|
||||
return True, "La recherche a bien été mise à jour", nb_doc
|
||||
else:
|
||||
mycommon.myprint(
|
||||
|
@ -1060,8 +1005,11 @@ def recherche_tips(diction):
|
|||
if (tab_training is False):
|
||||
return False, tab_training
|
||||
|
||||
mycommon.myprint(" pour phrase : #" + str(chaine[1]).lower() + "# , Pour le tips #"+ str(my_tips)+"#, voici la liste des formations")
|
||||
mycommon.myprint(tab_training)
|
||||
'''
|
||||
Pour analyser les recherches, decommenter les 2 lignes ci-dessous
|
||||
'''
|
||||
#mycommon.myprint(" pour phrase : #" + str(chaine[1]).lower() + "# , Pour le tips #"+ str(my_tips)+"#, voici la liste des formations")
|
||||
#mycommon.myprint(tab_training)
|
||||
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
|
|
Loading…
Reference in New Issue