137: #line 200 "python_tokeniser.ipk" 138: namechars, numchars = string.letters + '_', string.digits 139: 140: class python_tokeniser: 141: def __init__(self, squashop=0, report_comments=0, split_multiline_strings=0): 142: self.squashop = squashop 143: self.report_comments = report_comments 144: self.split_multiline_strings = split_multiline_strings 145: self.reset() 146: 147: def reset(self): 148: self.lnum = self.parenlev = self.continued = 0 149: self.contstr, self.needcont = '', 0 150: self.contline = None 151: self.indents = [0] 152: self.tokens = [] 153: self.buffer = '' 154: 155: def get_tokens(self): 156: tmp = self.tokens 157: self.tokens = [] 158: return tmp 159: 160: def tokenize(self,data): 161: self.write(data) 162: return self.get_tokens() 163: 164: def tokeneater(self,*args): 165: self.tokens.append(args) 166: 167: def close(self): 168: if self.buffer: 169: self.writeline(self.buffer) 170: self.buffer = '' 171: self.writeline('') 172: return self.get_tokens() 173: 174: def write(self,data): 175: lines = string.split(data,'\n') 176: if lines: 177: lines[0]=lines[0]+self.buffer 178: self.buffer = '' 179: for line in lines[:-1]: 180: self.writeline(line+'\n') 181: self.buffer = lines[-1] 182: