1 # -*- coding: utf-8 -*-
2 #Author: Pierre Ratinaud
9 from subprocess import *
11 from xml.dom import minidom, Node
14 txtdir = '/home/pierre/workspace/iramuteq/dev/langues/italian'
16 treetagger = '/home/pierre/prog/treetagger/cmd/tree-tagger-italian-utf8'
17 fileout = '/home/pierre/workspace/iramuteq/dev/langues/lexique_it_t1.txt'
18 stopword = '/home/pierre/workspace/iramuteq/dev/langues/IT_NEW_stopwords_utf8.txt'
19 lexique = '/home/pierre/workspace/iramuteq/dev/langues/lexique_it.txt'
20 xmlfile = '/home/pierre/workspace/iramuteq/dev/langues/itwiki-latest-pages-articles.xml'
25 class WikiPediaHandler(xml.sax.ContentHandler):
26 def __init__(self, sparser) :
32 self.sparser = sparser
34 def startElement(self, name, attrs):
36 self.sparser.treat_formes()
37 self.last = len(self.sparser.formes)
41 if len(self.totreat) > 100000 :
42 self.diff = len(self.sparser.formes) - self.last
43 self.sparser.doparsewiki(' '.join(self.totreat))
45 print 'titres :', self.tottitle
51 # for item in attrs.items():
53 def characters(self, content) :
55 self.totreat.append(content)
58 def __init__(self, txtdir, encodage, treetagger, fileout) :
60 self.encodage = encodage
63 self.fileout = fileout
65 #self.treat_formes(fileout)
67 def clean(self, txt) :
69 keep_caract = u"a-zA-Z0-9àÀâÂäÄáÁéÉèÈêÊëËìÌîÎïÏòÒôÔöÖùÙûÛüÜçÇß’ñ.:,;!?\n*'_-"
70 list_keep = u"[^" + keep_caract + "]+"
71 txt = re.sub(list_keep, ' ', txt)
72 txt = txt.replace(u'’',u"'")
73 txt = txt.replace(u'\'',u' ').replace(u'-', u' ')
74 txt = txt.replace(u'?',u' ? ').replace(u'.',u' . ').replace(u'!', u' ! ').replace(u',',u' , ').replace(u';', u' ; ').replace(u':',u' : ')
75 txt = ' '.join(txt.split())
78 def update_dict(self, tmpfile) :
79 with codecs.open(tmpfile, 'r', 'utf8') as f :
81 content = [line.split('\t') for line in content.splitlines()]
82 for forme in content :
83 if (forme[2] == u'<unknown>') or (forme[1] in [u'PON', u'<unknown>', u'SYM', u'SENT']) or (forme[1]==u'NUM' and forme[2]==u'@card@') :
85 elif (forme[0], forme[1]) in self.formes :
86 self.formes[(forme[0], forme[1])][0] += 1
88 self.formes[(forme[0], forme[1])] = [1, forme[2]]
89 print len(self.formes)
91 def treat_formes(self) :
94 for forme in self.formes :
95 if forme[0] in nformes :
96 if self.formes[forme][0] > nformes[forme[0]][0] :
97 nformes[forme[0]] = [self.formes[forme][0], forme[1], self.formes[forme][1]]
99 nformes[forme[0]] = [self.formes[forme][0], forme[1], self.formes[forme][1]]
100 with open(self.fileout, 'w') as f :
101 toprint = [[forme, nformes[forme][1], nformes[forme][2], `nformes[forme][0]`] for forme in nformes]
102 toprint = sorted(toprint)
103 toprint = '\n'.join(['\t'.join(line) for line in toprint])
104 f.write(toprint.encode('utf8'))
107 def doparsewiki(self, content) :
108 content = self.clean(content)
109 with open('/tmp/tmptxt', 'w') as f :
110 f.write(content.encode('utf8'))
111 p1 = Popen(['cat', '/tmp/tmptxt'], stdout = PIPE)
112 with open('/tmp/tttmp', 'w') as f :
113 p2 = Popen([treetagger], stdin = p1.stdout, stdout = f)
114 out = p2.communicate()
115 self.update_dict('/tmp/tttmp')
118 files = os.listdir(self.txtdir)
120 fpath = os.path.join(self.txtdir, fpath)
122 with codecs.open(fpath, 'r', self.encodage) as f :
124 content = self.clean(content)
125 with open('/tmp/tmptxt', 'w') as f :
126 f.write(content.encode('utf8'))
127 p1 = Popen(['cat', '/tmp/tmptxt'], stdout = PIPE)
128 with open('/tmp/tttmp', 'w') as f :
129 p2 = Popen([treetagger], stdin = p1.stdout, stdout = f)
130 out = p2.communicate()
131 self.update_dict('/tmp/tttmp')
135 def __init__(self, infile, outfile, stopw = None) :
137 with codecs.open(infile, 'r', 'utf8') as f :
139 content = [line.split('\t') for line in content.splitlines()]
140 content = [self.treatline(line) for line in content if line[3] != '1']
143 if stopw is not None :
144 with codecs.open(stopw, 'r', 'utf8') as f :
146 self.stw = stw.splitlines()
147 content = self.dostopword(content)
148 self.printcontent(content, outfile)
150 for forme in self.formes :
151 self.dictg[self.formes[forme][2]] = self.dictg.get(self.formes[forme][2],0) + 1
156 def treatline(self, line) :
157 gram = line[1].split(u':')[0].lower()
158 self.dictg[gram] = self.dictg.get(gram, 0) + 1
159 return [line[0], line[2], gram, int(line[3])]
161 def dostopword(self, content) :
162 for line in content :
163 self.formes[line[0]] = line
164 self.lems[line[1]] = line
165 for word in self.stw :
166 if word in self.formes :
167 print word, self.formes[word]
168 if self.formes[word][2] in ['adj','adv','ver','nom'] :
169 self.formes[word][2] = self.formes[word][2] + '_sup'
170 print self.formes[word]
172 self.formes[word] = [word, word, 'sw', 0]
173 return sorted([[forme, self.formes[forme][1], self.formes[forme][2]] for forme in self.formes])
175 def printcontent(self, content, outfile) :
176 with open(outfile, 'w') as f :
177 f.write('\n'.join(['\t'.join(line) for line in content]).encode('utf8'))
182 #sparser = Parser('', encodage, treetagger, fileout)
183 #parser = xml.sax.make_parser()
184 #parser.setContentHandler(WikiPediaHandler(sparser))
185 #parser.parse(open(xmlfile,"r"))
186 ##Parser(txtdir, encodage, treetagger, fileout)
187 PostTreat(fileout, lexique, stopword)