# -*- coding: utf-8 -*-
#Author: Pierre Ratinaud
-#Copyright (c) 2008-2020 Pierre Ratinaud
-#modification pour python 3 : Laurent Mérat, 6x7 - mai 2020
+#Copyright (c) 2008-2011 Pierre Ratinaud
#License: GNU/GPL
#------------------------------------
if (!is.null(graph.simi$com)) {
com <- graph.simi$com
colm <- rainbow(length(com))
- if (vertex.size != 0 || graph.simi$halo) {
+ if (sum(vertex.size) != 0 || graph.simi$halo) {
vertex.label.color <- 'black'
vertex.col <- colm[membership(com)]
} else {
tab <- read.csv2("%s", header=TRUE, sep=';', row.names=1)
""" % (ffr(self.pathout['tableafcm.csv']))
txt += """
+ cs <- colSums(tab)
+ if (min(cs) == 0) {
+ print('empty columns !!')
+ vide <- which(cs==0)
+ print(vide)
+ tab <- tab[,-vide]
+ }
+ #print('#### RcppIramuteq for C++ Labbe ####')
+ #library(RcppIramuteq)
+ #dist.mat <- labbe(as.matrix(tab))
+ #rownames(dist.mat) <- colnames(tab)
dist.mat <- dist.labbe(tab)
dist.mat <- as.dist(dist.mat, upper=F, diag=F)
write.table(as.matrix(dist.mat), "%s", sep='\t')
from uuid import uuid4
import datetime
from copy import copy
+#------test spacy------------
+#import spacy
+#nlp = spacy.load("fr_core_news_lg")
#------------------------------------
# import des fichiers du projet
def read_corpus(self) :
log.info('read corpus')
- self.parametres['syscoding'] = sys.getdefaultencoding()
+ self.parametres['syscoding'] = 'utf8'
if self.conncorpus is None :
self.conn_all()
res = self.ccorpus.execute('SELECT * FROM etoiles;')
self.make_iduces()
actuci = ''
actpara = False
- with open(outf,'w') as f :
+ with open(outf,'w', encoding='utf8') as f :
for uce in res :
if self.iduces[uce[0]].uci == actuci and self.iduces[uce[0]].para == actpara :
f.write(uce[1] + '\n')
longueur_max = max([len(val) for val in metas])
first = ['column_%i' % i for i in range(longueur_max)]
metas.insert(0, first)
- with open(outf, 'w') as f :
+ with open(outf, 'w', encoding='utf8') as f :
f.write('\n'.join(['\t'.join(line) for line in metas]))
def export_corpus_classes(self, outf, alc = True, lem = False, uci = False) :
self.make_iduces()
else :
res = self.getallucis()
- with open(outf, 'w') as f :
+ with open(outf, 'w', encoding='utf8') as f :
for uce in res :
guce = uce[1]
if not uci :
self.make_iduces()
else :
res = self.getuciconcorde(sts)
- with open(outf, 'w') as f :
+ with open(outf, 'w', encoding='utf8') as f :
for uce in res :
guce = uce[1]
if not uci :
outf = os.path.join(rep, outf)
if lem :
guce = ' '.join([self.formes[forme].lem for forme in guce.split()])
- with open(outf, 'w') as f :
+ with open(outf, 'w', encoding='utf8') as f :
f.write(guce) #.encode('cp1252', errors = 'replace'))
def export_tropes(self, fileout, classe, lem = False, uci = False) :
self.make_iduces()
else :
res = self.getuciconcorde(sts)
- with open(fileout, 'w') as f :
+ with open(fileout, 'w', encoding='utf8') as f :
for uce in res :
guce = uce[1]
if lem :
else :
tab = [[lem] + [len(set(self.getlemuces(lem)).intersection(classe)) for classe in ucecl] for lem in actives]
tab = [[line[0]] + [repr(val) for val in line[1:]] for line in tab if sum(line[1:]) >= 3]
- with open(fileout, 'w') as f :
+ with open(fileout, 'w', encoding='utf8') as f :
f.write('\n'.join([';'.join(line) for line in tab]))
def make_etoiles(self) :
else :
etoileuces = self.getetoileucis()
etoileuces = dict([[et, etoileuces[et]] for et in etoileuces if len(etoileuces[et]) > 1])
- with open(fileout, 'w') as f :
+ with open(fileout, 'w', encoding='utf8') as f :
f.write('\n'.join([';'.join([et] + [repr(len(set(etoileuces[et]).intersection(classe))) for classe in ucecl]) for et in etoileuces])) #.encode(self.parametres['syscoding'])
#etoiles = self.make_etoiles()
#with open(fileout, 'w') as f :
ucecl[uce] = 0
color = ['black'] + colors[len(self.lc) - 1]
txt = '''<html>
- <meta http-equiv="content-Type" content="text/html; charset=%s" />
+ <meta http-equiv="content-Type" content="text/html; charset=utf8" />
<body>
- ''' % sys.getdefaultencoding()
+ '''
if not uci :
res = self.getalluces()
self.make_iduces()
for taille_segment in range(lenmin,lenmax) :
d =self.count_from_list_cl([' '.join(uce[i:i+taille_segment]) for i in range(len(uce)-(taille_segment - 1))], d, b, len(self.lc))
result = [[seg] + [str(val) for val in d[seg]] for seg in d if sum(d[seg]) >= effmin]
- with open(fileout, 'w') as f :
+ with open(fileout, 'w', encoding='utf8') as f :
f.write('\n'.join([';'.join(line) for line in result]))
def make_proftype(self, outf) :
res[gram][i] += sum([lemuceeff[uce] for uce in concern])
res = [[gram] + [repr(val) for val in res[gram]] for gram in res]
res.sort()
- with open(outf, 'w') as f :
+ with open(outf, 'w', encoding='utf8') as f :
f.write('\n'.join([';'.join(line) for line in res]))
def make_ucecl_from_R(self, filein) :
log.info('%f' % (time() - t1))
if outf is not None :
toprint = '\n'.join([';'.join([repr(i), repr(occurrences[i]), repr(formescl[i]), repr(hapaxcl[i]), repr(lenclasses[i]), repr(float(hapaxcl[i])/float(formescl[i]))]) for i in occurrences])
- with open(outf, 'w') as f :
+ with open(outf, 'w', encoding='utf8') as f :
f.write(toprint)
else :
return [[repr(occurrences[i]), repr(formescl[i]), repr(hapaxcl[i]), repr(lenclasses[i]), repr(float(hapaxcl[i])/float(formescl[i]))] for i in occurrences]
txt += """
</body></html>
"""
- with open('/tmp/testhapxuce.html','w') as f :
+ with open('/tmp/testhapxuce.html','w', encoding='utf8') as f :
f.write(txt)
def export_dictionary(self, fileout, syscoding) :
listformes = [[self.formes[forme].freq, forme, self.formes[forme].lem, self.formes[forme].gram] for forme in self.formes]
listformes.sort(reverse = True)
listformes = [forme[1:] + [repr(forme[0])] for forme in listformes]
- with open(fileout, 'w') as f :
+ with open(fileout, 'w', encoding='utf8') as f :
f.write('\n'.join(['\t'.join(forme) for forme in listformes]))
def export_lems(self, fileout, syscoding) :
self.make_idformes()
listlem = [[lem, '\t'.join(['\t'.join([self.idformes[forme].forme, repr(self.lems[lem].formes[forme])]) for forme in self.lems[lem].formes])] for lem in self.lems]
listlem.sort()
- with open(fileout, 'w') as f :
+ with open(fileout, 'w', encoding='utf8') as f :
f.write('\n'.join(['\t'.join(lem) for lem in listlem]))
class BuildFromAlceste(BuildCorpus) :
def read_corpus(self, infile) :
+
if self.dlg is not None :
self.dlg.Pulse('textes : 0 - segments : 0')
self.limitshow = 0
if self.testuci(line) :
iduci += 1
if txt != [] :
+ #doc = nlp(' '.join(txt))
+ #print([[word, word.pos_, word.lemma_] for word in doc])
iduce, idpara = self.treattxt(txt, iduce, idpara, iduci - 1)
txt = []
self.corpus.ucis.append(Uci(iduci, line))
def write(self, path = None):
if path is None :
path = self.path
- with open(path, 'w') as f :
+ with open(path, 'w', encoding='utf8') as f :
f.write('\n'.join(['\t'.join([val] + self.tgen[val]) for val in self.tgen]))
def writetable(self, pathout, tgens, totocc):
etoiles = list(totocc.keys())
etoiles.sort()
- with open(pathout, 'w') as f :
+ with open(pathout, 'w', encoding='utf8') as f :
line = '\t'.join(['tgens'] + etoiles) + '\n'
f.write(line)
for t in tgens :
self.read()
def read(self) :
- d = shelve.open(self.filein)
+ with open(self.filein, 'r') as fjson :
+ d = json.load(fjson)
+# d = shelve.open(self.filein, protocol=1)
self.history = d.get('history', [])
self.matrix = d.get('matrix', [])
self.ordercorpus = dict([[corpus['uuid'], i] for i, corpus in enumerate(self.history)])
self.analyses = dict([[analyse['uuid'], analyse] for corpus in self.history for analyse in corpus.get('analyses', [])])
self.matrixanalyse = dict([[mat['uuid'], mat] for mat in self.matrix])
self.ordermatrix = dict([[matrix['uuid'], i] for i, matrix in enumerate(self.matrix)])
- d.close()
+# d.close()
def write(self) :
- d = shelve.open(self.filein)
+ d = {}
d['history'] = self.history
d['matrix'] = self.matrix
- d.close()
+ with open(self.filein, 'w') as f :
+ f.write(json.dumps(d, indent=4, default=str))
+ #d = shelve.open(self.filein, protocol=1)
+ #d.close()
def add(self, analyse) :
log.info('add to history %s' % analyse.get('corpus_name', 'pas un corpus'))
if outfile is None :
outfile = self.configfile
outfile = normpath_win32(outfile)
- with open(outfile, 'w') as f :
+ with open(outfile, 'w', encoding="utf-8") as f :
f.write(txt)
#self.conf.write(f)
def write_tab(tab, fileout) :
- csvWriter = csv.writer(open(fileout, 'w'), delimiter=';', quoting = csv.QUOTE_NONNUMERIC)
+ csvWriter = csv.writer(open(fileout, 'w', newline='', encoding='utf8'), delimiter=';', quoting = csv.QUOTE_NONNUMERIC)
csvWriter.writerows(tab)
class BugDialog(wx.Dialog):
AnalyseConf.set(section, 'clusternb', clusternb)
AnalyseConf.set(section, 'corpus_name', corpname)
- fileout = open(DictPathOut['ira'], 'w')
+ fileout = open(DictPathOut['ira'], 'w', encoding='utf8')
AnalyseConf.write(fileout)
fileout.close()
line[5] = str(float(line[5].replace(',', '.')))[0:7]
return [i, int(line[0]), int(line[1]), float(line[2]), float(line[3]), line[6], line[4], line[5]]
- def ReadProfileAsDico(File, Alceste=False, encoding = sys.getdefaultencoding()):
+ def ReadProfileAsDico(File, Alceste=False, encoding = 'utf8'):
dictlem = {}
print('lecture des profiles')
- FileReader = codecs.open(File, 'r', encoding)
+ FileReader = open(File, 'r', encoding='utf8')
Filecontent = FileReader.readlines()
FileReader.close()
DictProfile = {}
print('pas de son')
def ReadDicoAsDico(dicopath):
- with codecs.open(dicopath, 'r', 'UTF8') as f:
+ with open(dicopath, 'r', encoding='UTF8') as f:
content = f.readlines()
lines = [line.rstrip('\n\r').replace('\n', '').replace('"', '').split('\t') for line in content if line != '']
return dict([[line[0], line[1:]] for line in lines])
else :
parent.lexique = ReadDicoAsDico(filein)
- def ReadList(filein, encoding = sys.getdefaultencoding(), sep = ';'):
+ def ReadList(filein, encoding = 'utf8', sep = ';'):
#file = open(filein)
- with codecs.open(filein, 'r', encoding) as f :
+ with open(filein, 'r', encoding='utf8') as f :
content = f.read()
content = [line.replace('\n', '').replace('\r','').replace('\"', '').replace(',', '.').split(sep) for line in content.splitlines()]
#file = codecs.open(filein, 'r', encoding)
Popen(mycommand)
def print_liste(filename,liste):
- with open(filename,'w') as f :
+ with open(filename,'w', encoding='utf8') as f :
for graph in liste :
f.write(';'.join(graph) +'\n')
- def read_list_file(filename, encoding = sys.getdefaultencoding()):
- with codecs.open(filename,'r', encoding) as f:
+
+ def read_list_file(filename, encoding = 'utf8'):
+ with open(filename,'r', encoding='utf8') as f:
content=f.readlines()
ncontent=[line.replace('\n','').split(';') for line in content if line.strip() != '']
return ncontent
def getallstcarac(corpus, analyse) :
pathout = PathOut(analyse['ira'])
- profils = ReadProfileAsDico(pathout['PROFILE_OUT'], Alceste, self.encoding)
+ profils = ReadProfileAsDico(pathout['PROFILE_OUT'], Alceste, 'utf8')
print(profils)
def read_chd(filein, fileout):
def write_translation_profile(prof, lems, language, dictpathout) :
if os.path.exists(dictpathout['translations.txt']) :
- with codecs.open(dictpathout['translations.txt'], 'r', 'utf8') as f :
+ with open(dictpathout['translations.txt'], 'r', encoding='utf8') as f :
translist = f.read()
translist = [line.split('\t') for line in translist.splitlines()]
else :
elif line[0] == '*****' :
rest[i] = ['*****','*','*', '*', '*', '*']
toprint += rest
- with open(dictpathout['translation_profile_%s.csv' % language], 'w') as f :
+ with open(dictpathout['translation_profile_%s.csv' % language], 'w', encoding='utf8') as f :
f.write('\n'.join([';'.join(line) for line in toprint]))
- with open(dictpathout['translation_words_%s.csv' % language], 'w') as f :
+ with open(dictpathout['translation_words_%s.csv' % language], 'w', encoding='utf8') as f :
f.write('\n'.join(['\t'.join([val, lems[val]]) for val in lems]))
if 'translation_profile_%s.csv' % language not in [val[0] for val in translist] :
translist.append(['translation_profile_%s.csv' % language, 'translation_words_%s.csv' % language])
- with open(dictpathout['translations.txt'], 'w') as f :
+ with open(dictpathout['translations.txt'], 'w', encoding='utf8') as f :
f.write('\n'.join(['\t'.join(line) for line in translist]))
def makesentidict(infile, language) :
from tableau import Tableau
from tabrsimple import InputText
from tabsimi import DoSimi
+from tabcatego import Categorisation
from tabsplitvar import SplitMatrixFromVar
from tabverges import Prototypical
from textaslexico import Lexico
ID_Tabcontent = wx.Window.NewControlId()
ID_AFCM = wx.Window.NewControlId()
ID_SIMI = wx.Window.NewControlId()
+ID_CATE = wx.Window.NewControlId()
ID_CloseTab = wx.Window.NewControlId()
ID_SaveTab = wx.Window.NewControlId()
ID_CreateText = wx.Window.NewControlId()
def flush(self):
pass
- sys.stderr = writer()
- sys.stdout = printer()
+ #sys.stderr = writer()
+ #sys.stdout = printer()
images_analyses = {
'textroot' : 'textroot.png',
'preferences' : 'preferences.png',
'exportmetatable' : 'exportmetatable.png',
'importdmi' : 'twitter.png',
- 'labbe' : 'spec.png'
+ 'labbe' : 'spec.png',
+ 'categorisation' : 'spec.png',
}
log.info('Starting Iramuteq... ' )
log.info('version : %s' % ConfigGlob.get('DEFAULT', 'version'))
wx.Frame.__init__(self, parent, id, title, pos, size, style)
+ #Font
+ self.SetFont(wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
# configuration
self.AppliPath = AppliPath
self.images_path = os.path.join(AppliPath,'images')
[ID_SIMI, _("Similarities Analysis"), 'simimatrix'],
[ID_proto, _("Prototypical Analysis"), 'proto'],
[ID_Splitfromvar, _("Split from variable"), 'subcorpusmeta'],
+ [ID_CATE, _("ElCaTeGoRiZatoR"), 'categorisation'],
]
for analyse in matanalyses :
if not isinstance(analyse, dict) :
#------------------------------------------------------------------------------------------------
# fichier d'historique de Iramuteq
#------------------------------------------------------------------------------------------------
- #if not os.path.exists(os.path.join(UserConfigPath, 'history.db')) :
- # with open(os.path.join(UserConfigPath, 'history.db'), 'w') as f :
- # f.write('')
- self.history = History(os.path.join(UserConfigPath, 'historyIramuteq'))
+ if not os.path.exists(os.path.join(UserConfigPath, 'history.db')) :
+ with open(os.path.join(UserConfigPath, 'history.db'), 'w') as f :
+ f.write('{}')
+ self.history = History(os.path.join(UserConfigPath, 'history.db'))
# l'extension ".db" est ajoutée automatiquement par le module
#------------------------------------------------------------------------------------------------
Name("lefttree").
Caption(_("Historic")).
Left().
- MinSize(wx.Size(200,500)).
+ MinSize(wx.Size(300,400)).
Layer(1).
Position(1).
CloseButton(False).
self.Bind(wx.EVT_MENU, self.OnAFCM, id=ID_AFCM)
self.Bind(wx.EVT_MENU, self.OnProto, id=ID_proto)
self.Bind(wx.EVT_MENU, self.OnSplitVar, id = ID_Splitfromvar)
+ self.Bind(wx.EVT_MENU, self.OnCategorisation, id = ID_CATE)
#self.Bind(wx.EVT_MENU, self.OnRCode, id=ID_RCODE) #???
#self.Bind(wx.EVT_MENU, self.OnSplitVar, id=ID_SPLITVAR) #???
#self.Bind(wx.EVT_MENU, self.OnCheckcorpus, id = ID_CHECKCORPUS) #???
BestRPath = False
if not CheckRPath(self.PathPath) :
if sys.platform == 'win32':
- BestRPath = FindRPAthWin32()
+ if os.path.exists(self.AppliPath + '\\R\\R') :
+ BestRPath = self.AppliPath + '\\R\\R\\bin\\x64\\R.exe'
+ else :
+ BestRPath = FindRPAthWin32()
else:
BestRPath = FindRPathNix()
if BestRPath:
if not menu_pos is None :
self.mb.EnableTop(menu_pos, Show)
self.mb.Refresh()
- self._mgr.Update()
+ #self._mgr.Update()
#--------------------------------------------------------------------
# fin de __init__ du wx.Frame
if isinstance(parent, IraFrame) :
npage = self.nb.GetPage(new)
if 'parametres' in dir(npage) :
- self.tree.GiveFocus(uuid=npage.parametres['uuid'])
+ #self.tree.GiveFocus(uuid=npage.parametres['uuid'])
if npage.parametres.get('matrix', False) :
self.ShowMenu('text', False)
self.ShowMenu('matrix', True)
elif npage.parametres.get('corpus', False) :
self.ShowMenu('text')
self.ShowMenu('matrix', False)
+ #self._mgr.Update()
+ #wx.CallAfter(self.nb.SendSizeEvent)
+ #self.Refresh()
# action ou évènement ?
def OnCloseTab(self, evt):
pane.Hide()
self._mgr.GetPane(panel).Show()
self._mgr.Update()
+ print('show a pane refresh')
+ wx.CallAfter(self.nb.SendSizeEvent)
+ self.Refresh()
# action ou évènement ?
def OnAcceuil(self, event):
def OnSimiTab(self, event, matrix = None):
self.analyse_matrix(DoSimi, matrix = matrix, analyse_type = 'simimatrix', dlgnb = 5)
+ def OnCategorisation(self, event, matrix = None) :
+ self.analyse_matrix(Categorisation, matrix = matrix, analyse_type = 'categorisation', dlgnb = 1)
+
+
def OnCHDReinert(self, event, matrix = None):
#if matrix is None :
# matrix = self.tree.getmatrix()
displaySize = wx.DisplaySize()
w = displaySize[0]/1.2
h = displaySize[1]/1.2
- frame = IraFrame(None, -1, "IRaMuTeQ " + ConfigGlob.get('DEFAULT', 'version'), size=(w, h))
+ frame = IraFrame(None, -1, "IRaMuTeQ " + ConfigGlob.get('DEFAULT', 'version'), size=(int(w), int(h)))
frame.Show()
frame.finish_init()
frame.Upgrade()
from profile_segment import ProfileSegment
from listlex import *
from Liste import *
+from elcategorizator import ElCategorizator
from search_tools import SearchFrame
from dialog import PrefGraph, PrefExport, PrefSimpleFile, PrefDendro, SimpleDialog, ImageViewer
from guifunct import SelectColumn, PrepSimi, PrefSimi, redosimi
for i in range(0, clnb) :
clusternames[i] = ' '.join(['%i' % (i + 1), _('Cluster'), '%i' % (i + 1)])
if os.path.exists(self.pathout['classes_names.txt']) :
- with codecs.open(self.pathout['classes_names.txt'], 'r', self.parent.syscoding) as f :
+ with open(self.pathout['classes_names.txt'], 'r', encoding='utf8') as f :
clusternames_ = f.read()
clusternames_ = dict([[i, ' '.join([repr(i + 1), line])] for i, line in enumerate(clusternames_.splitlines())])
clusternames.update(clusternames_)
panel = wx.Panel(parent, -1)
sizer1 = wx.BoxSizer(wx.VERTICAL)
if os.path.exists(DictPathOut['pre_rapport']):
- with codecs.open(DictPathOut['pre_rapport'], 'r') as f :
+ with open(DictPathOut['pre_rapport'], 'r', encoding='utf8') as f :
txt = f.read()
self.debtext = txt
else :
self.parametres['tgenspec'] = os.path.join(self.parametres['pathout'], 'tgenchi2.csv')
TgenLayout(panel)
if os.path.exists(self.dictpathout['translations.txt']) :
- with codecs.open(self.dictpathout['translations.txt'], 'r', 'utf8') as f:
+ with open(self.dictpathout['translations.txt'], 'r', encoding='utf8') as f:
translist = f.read()
translist = [line.split('\t') for line in translist.splitlines()]
for line in translist :
def opentrans(self, trans) :
prof = ReadProfileAsDico(self.dictpathout[trans[0]], False)
- with codecs.open(self.dictpathout[trans[1]], 'r') as f :
+ with open(self.dictpathout[trans[1]], 'r', encoding='utf8') as f :
lems = f.read()
lems = [line.split('\t') for line in lems.splitlines()]
lems = dict(lems)
txt += ''.join([sep, '###########################', sep, _('time'), ' : %s' % parametres.get('time', ''), sep, '###########################', sep])
# ecriture du resultat dans le fichier
- with open(self.pathout['pre_rapport'], 'w') as f :
+ with open(self.pathout['pre_rapport'], 'w', encoding='utf8') as f :
f.write(txt)
self.dictpathout = StatTxtPathOut(parametres['pathout'])
#self.corpus.read_corpus_from_shelves(self.corpus.dictpathout['db'])
self.parent = ira
+ self.corpus.parametres['syscoding'] = 'UTF8'
self.encoding = self.corpus.parametres['syscoding']
self.parametres = parametres
self.DictSpec, first = ReadList(self.dictpathout['tablespecf'], self.corpus.parametres['syscoding'])
lcle = {'total' :'total.csv', 'formes_actives':'formes_actives.csv', 'formes_supplémentaires':'formes_supplémentaires.csv', 'hapax': 'hapax.csv'}
self.result = {}
for key in lcle :
- with codecs.open(self.pathout[lcle[key]], 'r', sys.getdefaultencoding()) as f :
+ with open(self.pathout[lcle[key]], 'r', encoding='utf-8') as f :
self.result[key] = [line.split(';') for line in f.read().splitlines()]
self.result[key] = dict([[i,[line[0],int(line[1]), line[2]]] for i, line in enumerate(self.result[key])])
- with open(self.pathout['glob.txt'], 'r') as f :
+ with open(self.pathout['glob.txt'], 'r', encoding='utf-8') as f :
self.result['glob'] = f.read()
#saveAsGEXF(graph, filepath = fileout)
""" % (ffr(self.pathout['RData.RData']), ffr(self.parent.RscriptsPath['simi']), fileout)
filetmp = tempfile.mktemp()
- with open(filetmp, 'w') as f :
+ with open(filetmp, 'w', encoding='utf8') as f :
f.write(txt)
exec_rcode(self.ira.RPath, filetmp)
mss = wx.MessageDialog(self.ira, fileout, _('File exported'), wx.OK)
#self.ira.nb.SetSelection(self.ira.nb.GetPageCount() - 1)
#self.ira.ShowAPane("Tab_content")
+class CateLayout(DefaultMatLayout) :
+
+ def dolayout(self) :
+ TabCate = ElCategorizator(self.ira.nb, self.pathout, self.tableau)
+ self.ira.nb.AddPage(TabCate, ' - '.join([_('ElCaTeGoRiZaToR'), self.parametres['name']]))
class SimiMatLayout(DefaultMatLayout) :