2 # -*- coding: utf-8 -*-
3 #Author: Pierre Ratinaud
4 #Copyright (c) 2008-2012 Pierre Ratinaud
9 from ConfigParser import ConfigParser
10 from subprocess import Popen, call, PIPE
22 from shutil import copyfile
25 #from dialog import BugDialog
28 log = logging.getLogger('iramuteq')
31 indices_simi = [u'cooccurrence' ,'pourcentage de cooccurrence',u'Russel',u'Jaccard', 'Kulczynski1', 'Kulczynski2', 'Mountford', 'Fager', 'simple matching', 'Hamman', 'Faith', 'Tanimoto', 'Dice', 'Phi', 'Stiles', 'Michael', 'Mozley', 'Yule', 'Yule2', 'Ochiai', 'Simpson', 'Braun-Blanquet','Chi-squared', 'Phi-squared', 'Tschuprow', 'Cramer', 'Pearson', 'binomial']
35 def open_folder(folder):
36 if sys.platform == "win32":
39 opener ="open" if sys.platform == "darwin" else "xdg-open"
40 #call([opener, folder])
41 call([u"%s %s &" % (opener, folder)], shell=True)
43 def normpath_win32(path) :
44 if not sys.platform == 'win32' :
46 while '\\\\' in path :
47 path = path.replace('\\\\', '\\')
48 if path.startswith('\\') and not path.startswith('\\\\') :
53 def __init__(self, path = None, encoding = 'utf8'):
56 self.encoding = encoding
58 def __getitem__(self, key):
61 def read(self, path = None):
64 with codecs.open(path, 'r', self.encoding) as f :
66 tgen = [line.split('\t') for line in tgen.splitlines()]
67 tgen = dict([[line[0], line[1:]] for line in tgen])
71 def write(self, path = None):
74 with open(path, 'w') as f :
75 f.write('\n'.join(['\t'.join([val] + self.tgen[val]) for val in self.tgen]).encode(self.encoding))
77 def writetable(self, pathout, tgens, totocc):
78 etoiles = totocc.keys()
80 with open(pathout, 'w') as f :
81 line = '\t'.join([u'tgens'] + etoiles) + '\n'
82 f.write(line.encode(self.encoding))
84 line = '\t'.join([t] + [`tgens[t][et]` for et in etoiles]) + '\n'
85 f.write(line.encode(self.encoding))
88 while totname + `i` in tgens :
90 totname = totname + `i`
91 line = '\t'.join([totname] + [`totocc[et]` for et in etoiles]) + '\n'
92 f.write(line.encode(self.encoding))
95 def __init__(self, filein, syscoding = 'utf8') :
97 self.syscoding = syscoding
99 self.openedcorpus = {}
100 self.openedmatrix = {}
108 d = shelve.open(self.filein)
109 self.history = d.get('history', [])
110 self.matrix = d.get('matrix', [])
111 self.ordercorpus = dict([[corpus['uuid'], i] for i, corpus in enumerate(self.history)])
112 self.corpus = dict([[corpus['uuid'], corpus] for corpus in self.history])
113 self.analyses = dict([[analyse['uuid'], analyse] for corpus in self.history for analyse in corpus.get('analyses', [])])
114 self.matrixanalyse = dict([[mat['uuid'], mat] for mat in self.matrix])
115 self.ordermatrix = dict([[matrix['uuid'], i] for i, matrix in enumerate(self.matrix)])
118 d['history'] = self.history
119 d['matrix'] = self.matrix
120 with open('/home/pierre/hystory.json', 'w') as f :
121 f.write(json.dumps(d, indent=4, default=str))
124 d = shelve.open(self.filein)
125 d['history'] = self.history
126 d['matrix'] = self.matrix
129 def add(self, analyse) :
130 log.info('add to history %s' % analyse.get('corpus_name', 'pas un corpus'))
131 tosave = {'uuid' : analyse['uuid'], 'ira': analyse['ira'], 'type' : analyse['type']}
132 if tosave['uuid'] in self.corpus :
133 log.info('problem : this uuid is already in history : %s' % tosave['uuid'])
135 if analyse.get('corpus', False) :
136 if analyse['uuid'] in self.analyses :
138 tosave['corpus'] = analyse['corpus']
139 tosave['name'] = analyse['name']
140 acorpus_uuid = analyse['corpus']
141 if acorpus_uuid in self.corpus :
142 if 'analyses' in self.history[self.ordercorpus[acorpus_uuid]] :
143 self.history[self.ordercorpus[acorpus_uuid]]['analyses'].append(tosave)
145 self.history[self.ordercorpus[acorpus_uuid]]['analyses'] = [tosave]
147 self.orph.append(tosave)
149 tosave['corpus_name'] = analyse['corpus_name']
150 #self.ordercorpus[tosave['uuid']] = len(history)
151 #self.corpus[tosave['uuid']] = analyse
152 self.history.append(tosave)
156 def addMatrix(self, analyse) :
158 #tosave['matrix_name'] = analyse['matrix_name']
159 tosave['analyses'] = []
160 self.matrix.append(tosave)
164 def addMatrixAnalyse(self, analyse) :
165 tosave = {'uuid' : analyse['uuid'], 'ira': analyse['ira'], 'type' : analyse['type'], 'matrix' : analyse['matrix']}
166 tosave['name'] = analyse['name']
167 if tosave['matrix'] in self.ordermatrix :
168 self.matrix[self.ordermatrix[tosave['matrix']]]['analyses'].append(tosave)
172 def addmultiple(self, analyses) :
173 log.info('add multiple')
174 for analyse in analyses :
175 tosave = {'uuid' : analyse['uuid'], 'ira': analyse['ira'], 'type' : analyse['type']}
176 corpus = analyse['corpus']
177 tosave['corpus'] = corpus
178 tosave['name'] = analyse['name']
179 if corpus in self.corpus :
180 if 'analyses' in self.history[self.ordercorpus[corpus]] :
181 self.history[self.ordercorpus[corpus]]['analyses'].append(tosave)
183 self.history[self.ordercorpus[corpus]]['analyses'] = [tosave]
187 def delete(self, analyse, corpus = False) :
188 log.info('delete %s' % analyse.get('name', 'noname'))
190 self.history.pop(self.ordercorpus[analyse['uuid']])
191 if analyse['uuid'] in self.openedcorpus :
192 del self.openedcorpus[analyse['uuid']]
193 log.info('delete corpus : %s' % analyse['uuid'])
194 elif analyse['uuid'] in self.analyses :
195 todel = [i for i, ana in enumerate(self.corpus[analyse['corpus']]['analyses']) if ana['uuid'] == analyse['uuid']][0]
196 self.history[self.ordercorpus[analyse['corpus']]]['analyses'].pop(todel)
197 elif analyse['uuid'] in self.matrixanalyse :
198 self.matrix = [mat for mat in self.matrix if mat['uuid'] != analyse['uuid']]
199 elif analyse.get('matrix', False) in self.matrixanalyse :
200 analyses = self.matrix[self.ordermatrix[analyse['matrix']]]['analyses']
201 topop = [i for i, val in enumerate(analyses) if analyse['uuid'] == val['uuid']][0]
203 self.matrix[self.ordermatrix[analyse['matrix']]]['analyses'] = analyses
207 def addtab(self, analyse) :
208 self.opened[analyse['uuid']] = analyse
210 def rmtab(self, analyse) :
211 del self.opened[analyse['uuid']]
213 def update(self, analyse) :
214 if 'matrix_name' in analyse :
215 self.matrixanalyse[analyse['uuid']].update(analyse)
216 elif 'corpus_name' in analyse :
217 self.corpus[analyse['uuid']].update(analyse)
218 elif 'corpus' in analyse :
219 self.analyses[analyse['uuid']].update(analyse)
221 toupdate = [an for an in self.matrixanalyse[analyse['matrix']]['analyses'] if an['uuid'] == analyse['uuid']]
222 toupdate[0].update(analyse)
227 corpustodel = [corpus for corpus in self.history if not os.path.exists(corpus['ira'])]
229 for corpus in corpustodel :
230 print 'cleaning :', corpus['corpus_name']
231 self.delete(corpus, corpus = True)
232 anatodel = [analyse for corpus in self.history for analyse in corpus.get('analyses', []) if not os.path.exists(analyse.get('ira', '/'))]
233 for analyse in anatodel :
234 print 'cleaning :', analyse['name']
249 for corpus in self.history :
250 analysenb += len(corpus.get('analyses', []))
251 analyses = corpus.get('analyses', [])
252 for analyse in analyses :
253 if os.path.exists(analyse['ira']) :
254 ana = DoConf(analyse['ira']).getoptions()
256 time = ana['time'].split()
257 ha += int(time[0].replace('h','')) * 3600
258 ma += int(time[1].replace('m','')) * 60
259 sa += int(time[2].replace('s',''))
260 if os.path.exists(corpus['ira']) :
261 param = DoConf(corpus['ira']).getoptions()
262 time = param.get('time','0h 0m 0s')
264 hours += int(time[0].replace('h','')) * 3600
265 minutes += int(time[1].replace('m','')) * 60
266 secondes += int(time[2].replace('s',''))
267 if param.get('originalpath', False) :
268 if param['originalpath'] in corpusnb :
269 corpusnb[param['originalpath']] += 1
270 tokens += int(param['occurrences'])
272 corpusnb[param['originalpath']] = 1
277 if corpus['ira'] in todel :
281 print u'Nbr total de corpus : %s' % len(self.history)
282 corpus_nb = len(corpusnb) + len(todel)
283 print u'Nbr de corpus différents : %s' % corpus_nb
284 lentodel = len(todel)
285 print u'Nbr de corpus à supprimer : %s' % lentodel
286 print u'Nbr de sous corpus : %s' % subnb
287 print u"Nbr total d'occurrences : %s" % tokens
288 print u'Moyenne occurrences par corpus : %f' % (tokens/corpus_nb)
289 print '---------------------'
290 print u"Nbr total d'analyses : %s" % analysenb
291 print u'Temps total indexation : %f h' % ((hours+minutes+secondes) / 3600)
292 print u'Temps total analyses : %f h' % ((ha+ma+sa) / 3600)
295 return str(self.history)
298 def __init__(self, configfile=None, diff = None, parametres = None) :
299 self.configfile = configfile
300 self.conf = ConfigParser()
302 if configfile is not None :
303 configfile = normpath_win32(configfile)
304 self.conf.readfp(codecs.open(configfile, 'r', 'utf8'))
306 if parametres is not None :
307 self.doparametres(parametres)
309 def doparametres(self, parametres) :
312 def getsections(self) :
313 return self.conf.sections()
315 def getoptions(self, section = None, diff = None):
318 section = self.conf.sections()[0]
319 for option in self.conf.options(section) :
320 if self.conf.get(section, option).isdigit() :
321 parametres[option] = int(self.conf.get(section, option))
322 elif self.conf.get(section, option) == 'False' :
323 parametres[option] = False
324 elif self.conf.get(section, option) == 'True' :
325 parametres[option] = True
326 elif self.conf.get(section, option).startswith('(') and self.conf.get(section, option).endswith(')') :
327 parametres[option] = ast.literal_eval(self.conf.get(section, option))
328 elif self.conf.get(section, option).startswith('[') and self.conf.get(section, option).endswith(']') :
329 parametres[option] = ast.literal_eval(self.conf.get(section, option))
331 parametres[option] = self.conf.get(section, option)
332 if 'type' not in parametres :
333 parametres['type'] = section
336 def makeoptions(self, sections, parametres, outfile = None) :
338 for i, section in enumerate(sections) :
339 txt += '[%s]\n' % section
340 if not self.conf.has_section(section) :
341 self.conf.add_section(section)
342 for option in parametres[i] :
343 if isinstance(parametres[i][option], int) :
344 self.conf.set(section, option, `parametres[i][option]`)
345 txt += '%s = %i\n' % (option, parametres[i][option])
346 elif isinstance(parametres[i][option], basestring) :
347 self.conf.set(section, option, parametres[i][option].encode('utf8'))
348 txt += '%s = %s\n' % (option, parametres[i][option])
349 elif isinstance(parametres[i][option], wx.Colour) :
350 self.conf.set(section, option, str(parametres[i][option]))
351 txt += '%s = %s\n' % (option, str(parametres[i][option]))
352 elif option == 'analyses' :
355 self.conf.set(section, option, `parametres[i][option]`)
356 txt += '%s = %s\n' % (option, `parametres[i][option]`)
358 outfile = self.configfile
359 outfile = normpath_win32(outfile)
360 with open(outfile, 'w') as f :
361 f.write(txt.encode('utf8'))
364 def totext(self, parametres) :
367 for val in parametres :
368 if isinstance(parametres[val], int) :
369 txt.append(' \t\t: '.join([val, `parametres[val]`]))
370 elif isinstance(parametres[val], basestring) :
371 txt.append(' \t\t: '.join([val, parametres[val]]))
372 elif val in ['listet', 'stars'] :
375 txt.append(' \t\t: '.join([val, `parametres[val]`]))
376 return '\n'.join(txt)
379 def write_tab(tab, fileout) :
380 writer = csv.writer(open(fileout, 'wb'), delimiter=';', quoting = csv.QUOTE_NONNUMERIC)
381 writer.writerows(tab)
383 class BugDialog(wx.Dialog):
384 def __init__(self, *args, **kwds):
385 # begin wxGlade: MyDialog.__init__
386 kwds["style"] = wx.DEFAULT_DIALOG_STYLE | wx.STAY_ON_TOP
387 kwds["size"] = wx.Size(500, 200)
388 wx.Dialog.__init__(self, *args, **kwds)
389 self.SetTitle(kwds['title'])
390 self.text_ctrl_1 = wx.TextCtrl(self, -1, "", style=wx.TE_MULTILINE)
391 self.text_ctrl_1.SetBackgroundColour('#DDE8EB')
392 self.button_1 = wx.Button(self, wx.ID_OK, "")
394 self.__set_properties()
398 def __set_properties(self):
399 # begin wxGlade: MyDialog.__set_properties
400 self.SetMinSize(wx.Size(500, 200))
401 self.text_ctrl_1.SetMinSize(wx.Size(500, 200))
405 def __do_layout(self):
406 # begin wxGlade: MyDialog.__do_layout
407 sizer_1 = wx.BoxSizer(wx.VERTICAL)
408 sizer_1.Add(self.text_ctrl_1, 1, wx.EXPAND, 0)
409 sizer_1.Add(self.button_1, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
410 self.SetSizer(sizer_1)
415 def CreateIraFile(DictPathOut, clusternb, corpname='corpus_name', section = 'analyse'):
416 AnalyseConf = ConfigParser()
417 AnalyseConf.read(DictPathOut['ira'])
418 AnalyseConf.add_section(section)
419 date = datetime.datetime.now().ctime()
420 AnalyseConf.set(section, 'date', str(date))
421 AnalyseConf.set(section, 'clusternb', clusternb)
422 AnalyseConf.set(section, 'corpus_name', corpname)
424 fileout = open(DictPathOut['ira'], 'w')
425 AnalyseConf.write(fileout)
428 def sortedby(list, direct, *indices):
431 sortedby: sort a list of lists (e.g. a table) by one or more indices
432 (columns of the table) and return the sorted list
435 for list = [[2,3],[1,2],[3,1]]:
436 sortedby(list,1) will return [[3, 1], [1, 2], [2, 3]],
437 sortedby(list,0) will return [[1, 2], [2, 3], [3, 1]]
440 nlist = map(lambda x, indices=indices:
441 map(lambda i, x=x: x[i], indices) + [x],
446 nlist.sort(reverse=True)
447 return map(lambda l: l[-1], nlist)
449 def add_type(line, dictlem):
450 if line[4] in dictlem:
451 line.append(dictlem[line[4]])
456 def treat_line_alceste(i, line) :
457 if line[0] == '*' or line[0] == '*****' :
462 elif float(line[5].replace(',', '.')) < 0.0001:
464 elif float(line[5].replace(',', '.')) > 0.05:
465 line[5] = 'NS (%s)' % str(float(line[5].replace(',', '.')))[0:7]
467 line[5] = str(float(line[5].replace(',', '.')))[0:7]
468 return [i, int(line[0]), int(line[1]), float(line[2]), float(line[3]), line[6], line[4], line[5]]
470 def ReadProfileAsDico(File, Alceste=False, encoding = sys.getdefaultencoding()):
472 print 'lecture des profiles'
473 FileReader = codecs.open(File, 'r', encoding)
474 Filecontent = FileReader.readlines()
478 #rows = [row.replace('\n', '').replace("'", '').replace('\"', '').replace(',', '.').replace('\r','').split(';') for row in Filecontent]
479 rows = [row.replace('\n', '').replace("'", '').replace('\"', '').replace('\r','').split(';') for row in Filecontent]
481 ClusterNb = rows[0][2]
483 clusters = [row[2] for row in rows if row[0] == u'**']
484 valclusters = [row[1:4] for row in rows if row[0] == u'****']
485 lp = [i for i, line in enumerate(rows) if line[0] == u'****']
486 prof = [rows[lp[i] + 1:lp[i+1] - 1] for i in range(0, len(lp)-1)] + [rows[lp[-1] + 1:len(rows)]]
488 prof = [[add_type(row, dictlem) for row in pr] for pr in prof]
489 prof = [[treat_line_alceste(i,line) for i, line in enumerate(pr)] for pr in prof]
491 prof = [[line + [''] for line in pr] for pr in prof]
492 prof = [[treat_line_alceste(i,line) for i, line in enumerate(pr)] for pr in prof]
493 for i, cluster in enumerate(clusters):
494 DictProfile[cluster] = [valclusters[i]] + prof[i]
497 def GetTxtProfile(dictprofile, cluster_size) :
499 for classe in range(0, len(dictprofile)) :
500 prof = dictprofile[str(classe + 1)]
501 clinfo = cluster_size[classe]
502 proflist.append('\n'.join([' '.join(['classe %i' % (classe + 1), '-', '%s uce sur %s - %s%%' % (clinfo[0], clinfo[1], clinfo[2])]), '\n'.join(['%5s|%5s|%6s|%6s|%8s|%8s|%20s\t%10s' % tuple([str(val) for val in line]) for line in prof if len(line)==8])]))
503 return '\n\n'.join(proflist)
505 def formatExceptionInfo(maxTBlevel=5):
506 cla, exc, trbk = sys.exc_info()
508 excName = cla.__name__
512 excArgs = exc.args[0]
514 excArgs = "<no args>"
515 excTb = traceback.format_tb(trbk, maxTBlevel)
516 return (excName, excArgs, excTb)
519 #fonction des etudiants de l'iut
520 def decoupercharact(chaine, longueur, longueurOptimale, separateurs = None) :
522 on part du dernier caractère, et on recule jusqu'au début de la chaîne.
523 Si on trouve un '$', c'est fini.
524 Sinon, on cherche le meilleur candidat. C'est-Ã -dire le rapport poids/distance le plus important.
526 separateurs = [[u'.', 60.0], [u'?', 60.0], [u'!', 60.0], [u'£$£', 60], [u':', 50.0], [u';', 40.0], [u',', 10.0], [u' ', 0.1]]
527 trouve = False # si on a trouvé un bon séparateur
528 iDecoupe = 0 # indice du caractere ou il faut decouper
530 # on découpe la chaine pour avoir au maximum 240 caractères
531 longueur = min(longueur, len(chaine) - 1)
532 chaineTravail = chaine[:longueur + 1]
534 meilleur = ['', 0, 0] # type, poids et position du meilleur separateur
536 # on vérifie si on ne trouve pas un '$'
537 indice = chaineTravail.find(u'$')
542 # si on ne trouve rien, on cherche le meilleur séparateur
545 caractere = chaineTravail[nbCar]
546 distance = abs(longueurOptimale - nbCar) + 1
547 meilleureDistance = abs(longueurOptimale - meilleur[2]) + 1
549 # on vérifie si le caractére courant est une marque de ponctuation
550 for s in separateurs:
551 if caractere == s[0]:
552 # si c'est une ponctuation
554 if s[1] / distance > float(meilleur[1]) / meilleureDistance:
562 # et on termine la recherche
565 # on passe au caractère précédant
570 fin = chaine[iDecoupe + 1:]
571 retour = chaineTravail[:iDecoupe]
572 return len(retour) > 0, retour.split(), fin
573 # si on a rien trouvé
574 return False, chaine.split(), ''
577 exceptions = {'paragrapheOT' : u"Un problème de formatage (présence d'un marqueur de paragraphe (-*) en dehors d'un texte) est survenu à la ligne ",
578 'EmptyText' : u"Texte vide (probablement un problème de formatage du corpus). Le problème est apparu à la ligne ",
579 'CorpusEncoding' : u"Problème d'encodage.",
580 'TextBeforeTextMark' : u"Problème de formatage : du texte avant le premier marqueur de texte (****). Le problème est survenu à la ligne ",
581 'MissingAnalyse' : u'Aucun fichier à cet emplacement :\n',
584 def BugReport(parent, error = None):
585 for ch in parent.GetChildren():
586 if "<class 'wx._windows.ProgressDialog'>" == str(type(ch)):
588 excName, exc, excTb = formatExceptionInfo()
589 if excName == 'Exception' :
591 if len(exc.split()) == 2 :
592 mss, linenb = exc.split()
593 if mss in exceptions :
594 txt = exceptions[mss] + linenb
598 if exc in exceptions :
599 txt = exceptions[exc]
602 title = "Information"
604 txt = u' !== BUG ==! \n'
605 txt += u'*************************************\n'
606 txt += '\n'.join(excTb).replace(' ', ' ')
607 txt += excName + '\n'
611 dial = BugDialog(parent, **{'title' : title})
612 if 'Rerror' in dir(parent) :
616 dial.text_ctrl_1.write(txt)
617 dial.CenterOnParent()
621 def PlaySound(parent):
622 if parent.pref.getboolean('iramuteq', 'sound') :
624 if "gtk2" in wx.PlatformInfo:
625 error = Popen(['aplay','-q',os.path.join(parent.AppliPath,'son_fin.wav')])
627 sound = wx.Sound(os.path.join(parent.AppliPath, 'son_fin.wav'))
628 sound.Play(wx.SOUND_SYNC)
632 def ReadDicoAsDico(dicopath):
633 with codecs.open(dicopath, 'r', 'UTF8') as f:
634 content = f.readlines()
635 lines = [line.rstrip('\n\r').replace(u'\n', '').replace('"', '').split('\t') for line in content if line != u'']
636 return dict([[line[0], line[1:]] for line in lines])
638 def ReadLexique(parent, lang = 'french', filein = None):
641 parent.lexique = ReadDicoAsDico(parent.DictPath.get(lang, 'french'))
643 parent.lexique = ReadDicoAsDico(filein)
648 parent.lexique = ReadDicoAsDico(filein)
650 def ReadList(filein, encoding = sys.getdefaultencoding(), sep = ';'):
652 with codecs.open(filein, 'r', encoding) as f :
654 content = [line.replace('\n', '').replace('\r','').replace('\"', '').replace(',', '.').split(sep) for line in content.splitlines()]
655 #file = codecs.open(filein, 'r', encoding)
656 #content = file.readlines()
658 first = content.pop(0)
659 #first = first.replace('\n', '').replace('\r','').replace('\"', '').split(sep)
663 #line = line.replace('\n', '').replace('\r','').replace('\"', '').replace(',', '.')
664 #line = line.split(';')
673 don = float('%.5f' % float(val))
679 def exec_RCMD(rpath, command) :
680 log.info('R CMD INSTALL %s' % command)
681 rpath = rpath.replace('\\','\\\\')
682 error = call(["%s" % rpath, 'CMD', 'INSTALL', "%s" % command])
685 def exec_rcode(rpath, rcode, wait = True, graph = False):
686 log.info("R Script : %s" % rcode)
688 if sys.platform == 'darwin' :
690 macversion = platform.mac_ver()[0].split('.')
691 if int(macversion[1]) < 5 :
698 rpath = rpath.replace('\\','\\\\')
699 env = os.environ.copy()
700 if sys.platform == 'darwin' and 'LC_ALL' not in env:
701 env['LC_ALL'] = 'en_US.UTF-8'
704 if sys.platform == 'win32':
705 error = call(["%s" % rpath, "--vanilla","--slave","-f", "%s" % rcode])
707 error = call([rpath, '--slave', "--vanilla", "-f %s" % rcode, "--encoding=UTF-8"], env = env)
710 if sys.platform == 'win32':
711 pid = Popen(["%s" % rpath, '--vanilla','--slave','-f', "%s" % rcode])
713 pid = Popen([rpath, '--slave', "--vanilla", "-f %s" % rcode, "--encoding=UTF-8"], stderr = PIPE, env = env)
717 if sys.platform == 'win32':
718 error = call(["%s" % rpath, '--vanilla','--slave','-f', "%s" % rcode])
719 elif sys.platform == 'darwin' and needX11:
720 os.environ['DISPLAY'] = ':0.0'
721 error = call([rpath, '--vanilla','--slave',"-f %s" % rcode, "--encoding=UTF-8"], env = env)
723 error = call([rpath, '--vanilla','--slave',"-f %s" % rcode, "--encoding=UTF-8"], env = env)
726 if sys.platform == 'win32':
727 pid = Popen(["%s" % rpath, '--vanilla','--slave','-f', "%s" % rcode])
728 elif sys.platform == 'darwin' and needX11:
729 os.environ['DISPLAY'] = ':0.0'
730 pid = Popen([rpath, '--vanilla','--slave',"-f %s" % rcode, "--encoding=UTF-8"], stderr = PIPE, env = env)
732 pid = Popen([rpath, '--vanilla','--slave',"-f %s" % rcode, "--encoding=UTF-8"], stderr = PIPE, env = env)
735 def check_Rresult(parent, pid) :
736 if isinstance(pid, Popen) :
737 if pid.returncode != 0 :
738 error = pid.communicate()
739 error = [str(error[0]), error[1]]
740 if error[1] is None :
742 parent.Rerror = '\n'.join([str(pid.returncode), '\n'.join(error)])
744 raise Exception('\n'.join([u'Erreur R', '\n'.join(error[1:])]))
753 raise Exception(u'Erreur R')
761 def launchcommand(mycommand):
764 def print_liste(filename,liste):
765 with open(filename,'w') as f :
767 f.write(';'.join(graph).encode(sys.getdefaultencoding(), errors='replace')+'\n')
769 def read_list_file(filename, encoding = sys.getdefaultencoding()):
770 with codecs.open(filename,'rU', encoding) as f :
771 content=f.readlines()
772 ncontent=[line.replace('\n','').split(';') for line in content if line.strip() != '']
775 def progressbar(self, maxi) :
776 ira = wx.GetApp().GetTopWindow()
782 prog = wx.ProgressDialog("Traitements",
783 "Veuillez patienter...",
786 style=wx.PD_APP_MODAL | wx.PD_AUTO_HIDE | wx.PD_ELAPSED_TIME | wx.PD_CAN_ABORT
788 prog.SetSize((400,150))
789 #prog.SetIcon(ira._icon)
792 def treat_var_mod(variables) :
794 variables = list(set(variables))
795 varmod = [variable.split('_') for variable in variables]
796 vars = list(set([var[0] for var in varmod if len(var) >=2]))
798 mods = ['_'.join(v) for v in varmod if v[0] == var]
801 # for variable in variables :
802 # if u'_' in variable :
803 # forme = variable.split(u'_')
806 # if not var in var_mod :
807 # var_mod[var] = [variable]
809 # if not mod in var_mod[var] :
810 # var_mod[var].append(variable)
813 def doconcorde(corpus, uces, mots, uci = False, et = False) :
815 ucestxt1 = [row for row in corpus.getconcorde(uces)]
817 ucestxt1 = [row for row in corpus.getuciconcorde(uces)]
818 ucestxt1 = dict(ucestxt1)
822 listmot = [corpus.getlems()[lem].formes for lem in mots]
823 listmot = [corpus.getforme(fid).forme for lem in listmot for fid in lem]
826 mothtml = ['<font color=red><b>%s</b></font>' % mot for mot in listmot]
827 dmots = dict(zip(listmot, mothtml))
829 ucetxt = ucestxt1[uce].split()
830 ucetxt = ' '.join([dmots.get(mot, mot) for mot in ucetxt])
832 uciid = corpus.getucefromid(uce).uci
833 ucis_txt.append('<p><b>' + ' '.join(corpus.ucis[corpus.getucefromid(uce).uci].etoiles) + '<a href="%i_%i"> *%i_%i</a></b></p>' % (uciid, uce, uciid, uce))
835 ucis_txt.append('<p><b>' + ' '.join(corpus.ucis[uce].etoiles) + '</b></p>')
836 ucestxt.append(ucetxt)
837 return ucis_txt, ucestxt
840 def getallstcarac(corpus, analyse) :
841 pathout = PathOut(analyse['ira'])
842 profils = ReadProfileAsDico(pathout['PROFILE_OUT'], Alceste, self.encoding)
845 def read_chd(filein, fileout):
846 with open(filein, 'r') as f :
848 #content = [line[3:].replace('"',"").replace(' ','') for line in content.splitlines()]
849 content = [line.split('\t') for line in content.splitlines()]
850 chd = {'name':1, 'children':[]}
852 for i, line in enumerate(content) :
854 chd['children'] = [{'name': line[1],'size' : content[i+1][0]}, {'name':line[2], 'size': content[i+1][1]}]
855 mere[line[1]] = chd['children'][0]
856 mere[line[2]] = chd['children'][1]
858 if 'children' in mere[line[0]]:
859 mere[line[0]]['children'].append({'name': line[1],'size' : content[i+1][0]})
860 mere[line[1]] = mere[line[0]]['children'][-1]
861 mere[line[0]]['children'].append({'name': line[2],'size' : content[i+1][1]})
862 mere[line[2]] = mere[line[0]]['children'][-1]
864 mere[line[0]]['children'] = [{'name': line[1],'size' : content[i+1][0]}, {'name':line[2], 'size': content[i+1][1]}]
865 mere[line[1]] = mere[line[0]]['children'][-2]
866 mere[line[2]] = mere[line[0]]['children'][-1]
867 with open(fileout, 'w') as f :
868 f.write(json.dumps(chd))
871 translation_languages = {"Afrikaans":"af", "Albanian":"sq", "Amharic":"am", "Arabic":"ar", "Armenian":"hy", "Azeerbaijani":"az", "Basque":"eu", "Belarusian":"be", "Bengali":"bn", "Bosnian":"bs", "Bulgarian":"bg", "Catalan":"ca", "Cebuano":"ceb", "Chichewa":"ny", "Chinese (Simplified)":"zh-CN", "Chinese (Traditional)":"zh-TW", "Corsican":"co", "Croatian":"hr", "Czech":"cs", "Danish":"da", "Dutch":"nl", "English":"en", "Esperanto":"eo", "Estonian":"et", "Filipino":"tl", "Finnish":"fi", "French":"fr", "Frisian":"fy", "Galician":"gl", "Georgian":"ka", "German":"de", "Greek":"el", "Gujarati":"gu", "Haitian Creole":"ht", "Hausa":"ha", "Hawaiian":"haw", "Hebrew":"iw", "Hindi":"hi", "Hmong":"hmn ", "Hungarian":"hu", "Icelandic":"is", "Igbo":"ig", "Indonesian":"id", "Irish":"ga", "Italian":"it", "Japanese":"ja", "Javanese":"jw", "Kannada":"kn", "Kazakh":"kk", "Khmer":"km", "Korean":"ko", "Kurdish":"ku", "Kyrgyz":"ky", "Lao":"lo", "Latin":"la", "Latvian":"lv", "Lithuanian":"lt", "Luxembourgish":"lb", "Macedonian":"mk", "Malagasy":"mg", "Malay":"ms", "Malayalam":"ml", "Maltese":"mt", "Maori":"mi", "Marathi":"mr", "Mongolian":"mn", "Burmese":"my", "Nepali":"ne", "Norwegian":"no", "Pashto":"ps", "Persian":"fa", "Polish":"pl", "Portuguese":"pt", "Punjabi":"ma", "Romanian":"ro", "Russian":"ru", "Samoan":"sm", "Scots Gaelic":"gd", "Serbian":"sr", "Sesotho":"st", "Shona":"sn", "Sindhi":"sd", "Sinhala":"si", "Slovak":"sk", "Slovenian":"sl", "Somali":"so", "Spanish":"es", "Sundanese":"su", "Swahili":"sw", "Swedish":"sv", "Tajik":"tg", "Tamil":"ta", "Telugu":"te", "Thai":"th", "Turkish":"tr", "Ukrainian":"uk", "Urdu":"ur", "Uzbek":"uz", "Vietnamese":"vi", "Welsh":"cy", "Xhosa":"xh", "Yiddish":"yi", "Yoruba":"yo", "Zulu":"zu", }
874 def gettranslation(words, lf, lt) :
877 agent = {'User-Agent':
885 .NET CLR 3.0.04506.30\
887 base_link = "https://translate.googleapis.com/translate_a/single?client=gtx&sl=%s&tl=%s&dt=t&q=%s"
889 totrans = urllib2.quote('\n'.join(words).encode('utf8'))
890 link = base_link % (lf, lt, totrans)
891 request = urllib2.Request(link, headers=agent)
892 raw_data = urllib2.urlopen(request).read()
893 data = json.loads(raw_data)
894 return [line[0].decode('utf8', error='replace').replace(u"'", u'_').replace(u' | ', u'|').replace(u' ', u'_').replace(u'-',u'_').replace(u'\n','') for line in data[0]]
896 def makenprof(prof, trans, deb=0) :
899 nprof.append(prof[0])
900 for i, val in enumerate(trans) :
901 line = prof[deb+i+1][:]
906 def treatempty(val) :
907 if val.strip() == '' :
912 def translateprofile(corpus, dictprofile, lf='it', lt='fr', maxword = 20) :
915 for i in range(len(dictprofile)) :
916 prof = dictprofile[`i+1`]
918 lenact = prof.index([u'*****', u'*', u'*', u'*', u'*', u'*', '', ''])
922 lenact = prof.index([u'*', u'*', u'*', u'*', u'*', u'*', '', ''])
928 lensup += prof.index([u'*', u'*', u'*', u'*', u'*', u'*', '', ''])
929 lensup = lensup - lenact
931 lensup += len(prof) - lenact
933 if lenact > maxword :
937 actori = [line[6] for line in prof[1:nlenact]]
938 act = [val.replace(u'_', u' ') for val in actori]
939 act = gettranslation(act, lf, lt)
940 for j, val in enumerate(actori) :
941 if act[j] not in lems :
944 while act[j] in lems :
945 act[j] = act[j] + u"+"
947 nprof[`i+1`] = makenprof(prof, act)
950 if lensup > maxword :
954 supori = [line[6] for line in prof[(1+lenact):(lenact+nlensup)]]
955 sup = [val.replace(u'_', u' ') for val in supori]
956 sup = [treatempty(val) for val in sup]
957 sup = gettranslation(sup, lf, lt)
958 for j, val in enumerate(supori) :
959 if sup[j] not in lems :
962 while sup[j] in lems :
963 sup[j] = sup[j] + u"+"
965 nprof[`i+1`].append([u'*****', u'*', u'*', u'*', u'*', u'*', '', ''])
966 nprof[`i+1`] += makenprof(prof, sup, deb=lenact)
969 lenet = prof.index([u'*', u'*', u'*', u'*', u'*', u'*', '', ''])
970 nprof[`i+1`].append([u'*', u'*', u'*', u'*', u'*', u'*', '', ''])
971 nprof[`i+1`] += prof[(lenet+1):]
976 def write_translation_profile(prof, lems, language, dictpathout) :
977 if os.path.exists(dictpathout['translations.txt']) :
978 with codecs.open(dictpathout['translations.txt'], 'r', 'utf8') as f :
980 translist = [line.split('\t') for line in translist.splitlines()]
984 toprint.append(['','','','','',''])
985 toprint.append([u'***', u'nb classes', `len(prof)`, u'***', '', ''])
986 for i in range(len(prof)) :
987 toprint.append([u'**', u'classe', `i+1`, u'**', '', ''])
988 toprint.append([u'****'] + prof[`i+1`][0] + [u'****'])
989 rest = [[`line[1]`, `line[2]`, `line[3]`, `line[4]`, line[6], line[7].replace('< 0,0001', '0.00009').replace('NS (','').replace(')','')] for line in prof[`i+1`][1:]]
990 for i, line in enumerate(prof[`i+1`][1:]) :
992 rest[i] = [u'*', u'*', u'*', u'*', u'*', u'*']
993 elif line[0] == u'*****' :
994 rest[i] = [u'*****',u'*',u'*', u'*', u'*', u'*']
996 with open(dictpathout['translation_profile_%s.csv' % language], 'w') as f :
997 f.write('\n'.join([';'.join(line) for line in toprint]).encode('utf8'))
998 with open(dictpathout['translation_words_%s.csv' % language], 'w') as f :
999 f.write('\n'.join(['\t'.join([val, lems[val]]) for val in lems]).encode('utf8'))
1000 if 'translation_profile_%s.csv' % language not in [val[0] for val in translist] :
1001 translist.append(['translation_profile_%s.csv' % language, 'translation_words_%s.csv' % language])
1002 with open(dictpathout['translations.txt'], 'w') as f :
1003 f.write('\n'.join(['\t'.join(line) for line in translist]).encode('utf8'))
1005 def makesentidict(infile, language) :
1006 #'/home/pierre/workspace/iramuteq/dev/langues/NRC/NRC-Emotion-Lexicon.csv'
1007 with codecs.open(infile,'r', 'utf8') as f :
1009 content = [line.split('\t') for line in content.splitlines()]
1010 titles = content.pop(0)
1011 senti = ['Positive', 'Negative', 'Anger', 'Anticipation', 'Disgust', 'Fear', 'Joy', 'Sadness', 'Surprise', 'Trust']
1014 sentid[sent] = titles.index(sent)
1015 frtitle = [val for val in titles if '(fr)' in val]
1016 frid = titles.index(frtitle[0])
1017 sentidict = [[line[frid].lower(), [line[sentid[sent]] for sent in senti]] for line in content]
1018 pos = ['positive'] + [line[0] for line in sentidict if line[1][0] == '1']
1019 neg = ['negative'] + [line[0] for line in sentidict if line[1][1] == '1']
1020 anger = ['anger'] + [line[0] for line in sentidict if line[1][2] == '1']
1021 anticipation = ['anticipation'] + [line[0] for line in sentidict if line[1][3] == '1']
1022 disgust = ['disgust'] + [line[0] for line in sentidict if line[1][4] == '1']
1023 fear = ['fear'] + [line[0] for line in sentidict if line[1][5] == '1']
1024 joy = ['joy'] + [line[0] for line in sentidict if line[1][6] == '1']
1025 sadness = ['sadness'] + [line[0] for line in sentidict if line[1][7] == '1']
1026 surprise = ['surprise'] + [line[0] for line in sentidict if line[1][8] == '1']
1027 trust = ['trust'] + [line[0] for line in sentidict if line[1][9] == '1']
1028 with open('/tmp/tgenemo.csv', 'w') as f :
1029 for val in [pos, neg, anger, anticipation, disgust, fear, joy, sadness, surprise, trust] :
1030 f.write('\t'.join(val).encode('utf8') + '\n')
1032 def countsentfromprof(prof, encoding, sentidict) :
1033 with codecs.open(prof, 'r', encoding) as f :
1035 content = [line.split(';') for line in content.splitlines()]
1037 content = [[line[0], [int(val) for val in line[1:]]] for line in content]
1039 content = dict(content)
1042 def iratolexico(infile, outfile, encoding) :
1043 with codecs.open(infile, 'r', encoding) as f :
1045 if line.startswith(u'**** ') :