rem
stringlengths
2
226k
add
stringlengths
0
227k
context
stringlengths
8
228k
meta
stringlengths
156
215
input_ids
list
attention_mask
list
labels
list
return bool(self.__flags & DEF_STARSTAR)
return bool(self.__flags & DEF_DOUBLESTAR)
def is_keywordarg(self): return bool(self.__flags & DEF_STARSTAR)
938ae439ad52f00395c7a63d14ea4743f615f025 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8125/938ae439ad52f00395c7a63d14ea4743f615f025/symtable.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 67, 11041, 3175, 12, 2890, 4672, 327, 1426, 12, 2890, 16186, 7133, 473, 25957, 67, 20943, 20943, 13, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 67, 11041, 3175, 12, 2890, 4672, 327, 1426, 12, 2890, 16186, 7133, 473, 25957, 67, 20943, 20943, 13, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
self.btn_ok = wx.Button(panel, wx.ID_OK)
def _init_ui(self): splitter = spl.MultiSplitterWindow(self, style=wx.SP_LIVE_UPDATE) splitter.SetOrientation(wx.VERTICAL) self.splitter = splitter
3acfe7b571ec1003fe0ded5731ec331ac0d51182 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10228/3acfe7b571ec1003fe0ded5731ec331ac0d51182/import_panel.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 2738, 67, 4881, 12, 2890, 4672, 21553, 273, 6121, 18, 5002, 26738, 3829, 12, 2890, 16, 2154, 33, 27226, 18, 3118, 67, 2053, 3412, 67, 8217, 13, 21553, 18, 694, 14097, 12, 27226, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 2738, 67, 4881, 12, 2890, 4672, 21553, 273, 6121, 18, 5002, 26738, 3829, 12, 2890, 16, 2154, 33, 27226, 18, 3118, 67, 2053, 3412, 67, 8217, 13, 21553, 18, 694, 14097, 12, 27226, 1...
logger.exception("Error locking file", path, pid)
if not pid: pid = 'UNKNOWN' logger.exception("Error locking file %s; pid=%s", path, pid)
def __init__(self, path): self._path = path fp = open(path, 'w+')
ddfa05a4af0b93221cc01a533ff982175bc927cc /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/10048/ddfa05a4af0b93221cc01a533ff982175bc927cc/lock_file.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 589, 4672, 365, 6315, 803, 273, 589, 4253, 273, 1696, 12, 803, 16, 296, 91, 15, 6134, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 589, 4672, 365, 6315, 803, 273, 589, 4253, 273, 1696, 12, 803, 16, 296, 91, 15, 6134, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
new = pickle.loads(pickle.dumps(e, random.randint(0, 2))) for checkArgName in expected: self.assertEquals(repr(getattr(e, checkArgName)), repr(expected[checkArgName]), 'pickled exception "%s", attribute "%s' % (repr(e), checkArgName))
for p in pickle, cPickle: for protocol in range(p.HIGHEST_PROTOCOL + 1): new = p.loads(p.dumps(e, protocol)) for checkArgName in expected: got = repr(getattr(new, checkArgName)) want = repr(expected[checkArgName]) self.assertEquals(got, want, 'pickled "%r", attribute "%s' % (e, checkArgName))
def testAttributes(self): # test that exception attributes are happy try: str(u'Hello \u00E1') except Exception, e: sampleUnicodeEncodeError = e
bb8d88aac4867aa6708a73adf2001252815a0e7e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3187/bb8d88aac4867aa6708a73adf2001252815a0e7e/test_exceptions.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 2498, 12, 2890, 4672, 468, 1842, 716, 1520, 1677, 854, 5622, 2074, 775, 30, 609, 12, 89, 11, 18601, 521, 89, 713, 41, 21, 6134, 1335, 1185, 16, 425, 30, 3296, 16532, 5509, 668, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 2498, 12, 2890, 4672, 468, 1842, 716, 1520, 1677, 854, 5622, 2074, 775, 30, 609, 12, 89, 11, 18601, 521, 89, 713, 41, 21, 6134, 1335, 1185, 16, 425, 30, 3296, 16532, 5509, 668, ...
"./scripts",
os.path.join(".", "scripts"),
def path(filename=""): urkpath = os.path.dirname(inspect.getfile(sys.modules[__name__])) if filename: return os.path.join(urkpath, filename) else: return urkpath
d0adda9bab5b2bccf6204e7f054a15838ed41bc4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10693/d0adda9bab5b2bccf6204e7f054a15838ed41bc4/urk.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 589, 12, 3459, 1546, 6, 4672, 8896, 79, 803, 273, 1140, 18, 803, 18, 12287, 12, 12009, 18, 588, 768, 12, 9499, 18, 6400, 63, 972, 529, 972, 22643, 225, 309, 1544, 30, 327, 1140, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 589, 12, 3459, 1546, 6, 4672, 8896, 79, 803, 273, 1140, 18, 803, 18, 12287, 12, 12009, 18, 588, 768, 12, 9499, 18, 6400, 63, 972, 529, 972, 22643, 225, 309, 1544, 30, 327, 1140, 18, ...
def hasPermission(self, permission, classname=_marker):
def hasPermission(self, permission, classname=_marker, itemid=None):
def hasPermission(self, permission, classname=_marker): """Check whether the user has 'permission' on the current class.""" if classname is self._marker: classname = self.client.classname return self.db.security.hasPermission(permission, self.client.userid, classname)
cc0f0abbe66d4241d7d56b7702ee4009092ffdfe /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/1906/cc0f0abbe66d4241d7d56b7702ee4009092ffdfe/actions.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 26097, 12, 2890, 16, 4132, 16, 7479, 33, 67, 11145, 16, 16862, 33, 7036, 4672, 3536, 1564, 2856, 326, 729, 711, 296, 9827, 11, 603, 326, 783, 667, 12123, 309, 7479, 353, 365, 6315, 111...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 26097, 12, 2890, 16, 4132, 16, 7479, 33, 67, 11145, 16, 16862, 33, 7036, 4672, 3536, 1564, 2856, 326, 729, 711, 296, 9827, 11, 603, 326, 783, 667, 12123, 309, 7479, 353, 365, 6315, 111...
"Found %i chars so far for %s, file says end %i:\n%s" \ % (len(seqs[index]), id, end, repr(seqs[index]))
"Found %i chars so far for sequence %i (%s, %s, start=%i), file says end %i:\n%s" \ % (len(seqs[index].replace("-","")), index, id, repr(seqs[index]), seq_starts[index], end, line)
def next(self) :
defbfe130b777f7099d44e06b66a0f9814f8b161 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7167/defbfe130b777f7099d44e06b66a0f9814f8b161/EmbossIO.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1024, 12, 2890, 13, 294, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1024, 12, 2890, 13, 294, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
'required': Or(Bool(Eval('centralised')), And(Equal(Eval('type'), 'cash'), Bool(Get(Eval('context', {}), 'company', 0)))),
'required': And(Or(Bool(Eval('centralised')), Equal(Eval('type'), 'cash')), Bool(Get(Eval('context', {}), 'company', 0))),
def default_readonly(self): return False
817ecaa4b793b2e23d6530f50ee8b0c4015a0f6b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9290/817ecaa4b793b2e23d6530f50ee8b0c4015a0f6b/journal.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 805, 67, 16365, 12, 2890, 4672, 327, 1083, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 805, 67, 16365, 12, 2890, 4672, 327, 1083, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
s1 = '(%s)'%s1 return "%s%s%s"% (s0, s, s1) def _latex_(self):
if not self._operands[1]._is_atomic(): s1 = '%s%s%s' % (paren_symbols[0],s1,paren_symbols[1]) return "%s%s%s" % (s0, op_symbols[op], s1) def _latex_(self): parens = ('\\left(', '\\right)')
def _repr_(self): ops = self._operands s0 = str(ops[0]) s = symbols[self._operator] if s in ['*', '/', '^']: if '+' in s0 or '-' in s0: s0 = '(%s)'%s0 s1 = str(ops[1]) if s in ['*', '/', ' - ']: if '+' in s1 or '-' in s1: s1 = '(%s)'%s1 return "%s%s%s"% (s0, s, s1) # TODO: Bobby -- make the latex below use logic as above to parenthesize!
d80bf7cbd123e40324df94f42d0a13a4f3c09a6c /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9417/d80bf7cbd123e40324df94f42d0a13a4f3c09a6c/calculus.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 12715, 67, 12, 2890, 4672, 6727, 273, 365, 6315, 4063, 5708, 272, 20, 273, 609, 12, 4473, 63, 20, 5717, 272, 273, 7963, 63, 2890, 6315, 9497, 65, 309, 272, 316, 10228, 14, 2187, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 12715, 67, 12, 2890, 4672, 6727, 273, 365, 6315, 4063, 5708, 272, 20, 273, 609, 12, 4473, 63, 20, 5717, 272, 273, 7963, 63, 2890, 6315, 9497, 65, 309, 272, 316, 10228, 14, 2187, ...
citation = Citation()
citation = mysite.profile.models.Citation()
def create_citations_from_debianqa_results(dia_id, results): dia = DataImportAttempt.objects.get(id=dia_id) person = dia.person for package_name, package_description in results: (project, _) = Project.objects.get_or_create(name=package_name) package_link = 'http://packages.debian.org/src:' + urllib.quote( package_name) if PortfolioEntry.objects.filter(person=person, project=project).count() == 0: portfolio_entry = PortfolioEntry(person=person, project=project, project_description=package_description) portfolio_entry.save() portfolio_entry = PortfolioEntry.objects.filter(person=person, project=project)[0] citation = Citation() citation.languages = "" # FIXME ", ".join(result['languages']) citation.contributor_role='Maintainer' citation.portfolio_entry = portfolio_entry citation.data_import_attempt = dia citation.url = package_link citation.save_and_check_for_duplicates() # And add a citation to the Debian portfolio entry (project, _) = Project.objects.get_or_create(name='Debian GNU/Linux') if PortfolioEntry.objects.filter(person=person, project=project).count() == 0: portfolio_entry = PortfolioEntry(person=person, project=project, project_description= 'The universal operating system') portfolio_entry.save() portfolio_entry = PortfolioEntry.objects.filter(person=person, project=project)[0] citation = Citation() citation.languages = '' # FIXME: ? citation.contributor_role='Maintainer of %s' % package_name citation.portfolio_entry = portfolio_entry citation.data_import_attempt = dia citation.url = package_link citation.save_and_check_for_duplicates() person.last_polled = datetime.datetime.now() person.save() dia.completed = True dia.save()
f1d0d2222a9a25e0a8611c6a5776e209776dcdc0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11976/f1d0d2222a9a25e0a8611c6a5776e209776dcdc0/__init__.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 71, 18282, 67, 2080, 67, 31888, 2779, 7588, 67, 4717, 12, 72, 1155, 67, 350, 16, 1686, 4672, 302, 1155, 273, 1910, 5010, 7744, 18, 6911, 18, 588, 12, 350, 33, 72, 1155, 67, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 71, 18282, 67, 2080, 67, 31888, 2779, 7588, 67, 4717, 12, 72, 1155, 67, 350, 16, 1686, 4672, 302, 1155, 273, 1910, 5010, 7744, 18, 6911, 18, 588, 12, 350, 33, 72, 1155, 67, ...
modules = _abcoll, io
modules = _abcoll, _pyio
def dash_R(the_module, test, indirect_test, huntrleaks): """Run a test multiple times, looking for reference leaks. Returns: False if the test didn't leak references; True if we detected refleaks. """ # This code is hackish and inelegant, but it seems to do the job. import copy_reg, _abcoll, io if not hasattr(sys, 'gettotalrefcount'): raise Exception("Tracking reference leaks requires a debug build " "of Python") # Save current values for dash_R_cleanup() to restore. fs = warnings.filters[:] ps = copy_reg.dispatch_table.copy() pic = sys.path_importer_cache.copy() abcs = {} modules = _abcoll, io for abc in [getattr(mod, a) for mod in modules for a in mod.__all__]: # XXX isinstance(abc, ABCMeta) leads to infinite recursion if not hasattr(abc, '_abc_registry'): continue for obj in abc.__subclasses__() + [abc]: abcs[obj] = obj._abc_registry.copy() if indirect_test: def run_the_test(): indirect_test() else: def run_the_test(): reload(the_module) deltas = [] nwarmup, ntracked, fname = huntrleaks repcount = nwarmup + ntracked print >> sys.stderr, "beginning", repcount, "repetitions" print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount] dash_R_cleanup(fs, ps, pic, abcs) for i in range(repcount): rc = sys.gettotalrefcount() run_the_test() sys.stderr.write('.') dash_R_cleanup(fs, ps, pic, abcs) if i >= nwarmup: deltas.append(sys.gettotalrefcount() - rc - 2) print >> sys.stderr if any(deltas): msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas)) print >> sys.stderr, msg with open(fname, "a") as refrep: print >> refrep, msg refrep.flush() return True return False
e55df1fa2a9be432c5c22d7eec8b395227fa4405 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/8546/e55df1fa2a9be432c5c22d7eec8b395227fa4405/regrtest.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 12558, 67, 54, 12, 5787, 67, 2978, 16, 1842, 16, 16807, 67, 3813, 16, 366, 318, 313, 298, 581, 87, 4672, 3536, 1997, 279, 1842, 3229, 4124, 16, 7849, 364, 2114, 20891, 87, 18, 225, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 12558, 67, 54, 12, 5787, 67, 2978, 16, 1842, 16, 16807, 67, 3813, 16, 366, 318, 313, 298, 581, 87, 4672, 3536, 1997, 279, 1842, 3229, 4124, 16, 7849, 364, 2114, 20891, 87, 18, 225, 2...
return { version: versionFlavorDict[version] } return {}
if self.getLeaves: return { version: versionFlavorDict[version] } else: results[version] = versionFlavorDict[version] return results
def filterTroveMatches(self, name, versionFlavorDict): versionFlavorDict = QueryByBranch.filterTroveMatches(self, name, versionFlavorDict) versionStr = self.map[name][1] try: verRel = versions.Revision(versionStr) except errors.ParseError: verRel = None
a2ea993adf451f48424cf3c73759a6d91abb3759 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8747/a2ea993adf451f48424cf3c73759a6d91abb3759/findtrove.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1034, 56, 303, 537, 6869, 12, 2890, 16, 508, 16, 1177, 2340, 9444, 5014, 4672, 1177, 2340, 9444, 5014, 273, 2770, 858, 7108, 18, 2188, 56, 303, 537, 6869, 12, 2890, 16, 508, 16, 1177, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1034, 56, 303, 537, 6869, 12, 2890, 16, 508, 16, 1177, 2340, 9444, 5014, 4672, 1177, 2340, 9444, 5014, 273, 2770, 858, 7108, 18, 2188, 56, 303, 537, 6869, 12, 2890, 16, 508, 16, 1177, ...
print config
def __init__ (self, config, params): register= params['register'] register (events.PRIVATE_MESSAGE, self.register) self.config= config print config
dfaa3399c7f26387c0b92cdbd52cd0e1392c400e /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/5759/dfaa3399c7f26387c0b92cdbd52cd0e1392c400e/freenode.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 261, 2890, 16, 642, 16, 859, 4672, 1744, 33, 859, 3292, 4861, 3546, 1744, 261, 5989, 18, 20055, 67, 8723, 16, 365, 18, 4861, 13, 365, 18, 1425, 33, 642, 1172, 642, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 261, 2890, 16, 642, 16, 859, 4672, 1744, 33, 859, 3292, 4861, 3546, 1744, 261, 5989, 18, 20055, 67, 8723, 16, 365, 18, 4861, 13, 365, 18, 1425, 33, 642, 1172, 642, 2...
'platform', 'exit', 'maxint')
'platform', 'exit', 'maxint')
def reload(self, module, path=None): if path is None and hasattr(module, '__filename__'): head, tail = os.path.split(module.__filename__) path = [os.path.join(head, '')] return ihooks.ModuleImporter.reload(self, module, path)
ce7c76df1b92a89c9b48d025af1313d1dcf28c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ce7c76df1b92a89c9b48d025af1313d1dcf28c4f/rexec.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7749, 12, 2890, 16, 1605, 16, 589, 33, 7036, 4672, 309, 589, 353, 599, 471, 3859, 12, 2978, 16, 4940, 3459, 7250, 4672, 910, 16, 5798, 273, 1140, 18, 803, 18, 4939, 12, 2978, 16186, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7749, 12, 2890, 16, 1605, 16, 589, 33, 7036, 4672, 309, 589, 353, 599, 471, 3859, 12, 2978, 16, 4940, 3459, 7250, 4672, 910, 16, 5798, 273, 1140, 18, 803, 18, 4939, 12, 2978, 16186, ...
def read_char(self, timed_input_routine, timed_input_interval):
def read_char(self, timed_input_routine=None, timed_input_interval=0):
def read_char(self, timed_input_routine, timed_input_interval): """ Reads a single character from the stream and returns it as a unicode character.
af32803cab2c520e7c564598678d7dc125faebf0 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/3943/af32803cab2c520e7c564598678d7dc125faebf0/zstream.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 67, 3001, 12, 2890, 16, 7491, 67, 2630, 67, 22640, 33, 7036, 16, 7491, 67, 2630, 67, 6624, 33, 20, 4672, 3536, 29185, 279, 2202, 3351, 628, 326, 1407, 471, 1135, 518, 487, 279, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 67, 3001, 12, 2890, 16, 7491, 67, 2630, 67, 22640, 33, 7036, 16, 7491, 67, 2630, 67, 6624, 33, 20, 4672, 3536, 29185, 279, 2202, 3351, 628, 326, 1407, 471, 1135, 518, 487, 279, ...
bt.clicked = (my_scroller_go_300_300, sc)
bt.callback_clicked_add(my_scroller_go_300_300, sc)
def scroller_clicked(obj, it, *args, **kwargs): win = elementary.Window("scroller", elementary.ELM_WIN_BASIC) win.title_set("Scroller") win.autodel_set(True) bg = elementary.Background(win) win.resize_object_add(bg) bg.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) bg.show() tb = elementary.Table(win) tb.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) img = ["images/panel_01.jpg", "images/plant_01.jpg", "images/rock_01.jpg", "images/rock_02.jpg", "images/sky_01.jpg", "images/sky_02.jpg", "images/sky_03.jpg", "images/sky_04.jpg", "images/wood_01.jpg"] n = 0 for j in range(12): for i in range(12): bg2 = elementary.Background(win) bg2.file_set(img[n]) n = n + 1 if n >= 9: n = 0 bg2.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) bg2.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL) bg2.size_hint_min_set(318, 318) tb.pack(bg2, i, j, 1, 1) bg2.show() sc = elementary.Scroller(win) sc.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) win.resize_object_add(sc) sc.content_set(tb) tb.show() sc.page_relative_set(1.0, 1.0) sc.show() tb2 = elementary.Table(win) tb2.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) win.resize_object_add(tb2) bt = elementary.Button(win) bt.label_set("to 300 300") bt.clicked = (my_scroller_go_300_300, sc) bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) bt.size_hint_align_set(0.1, 0.1) tb2.pack(bt, 0, 0, 1, 1) bt.show() bt = elementary.Button(win) bt.label_set("to 900 300") bt.clicked = (my_scroller_go_900_300, sc) bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) bt.size_hint_align_set(0.9, 0.1) tb2.pack(bt, 1, 0, 1, 1) bt.show() bt = elementary.Button(win) bt.label_set("to 300 900") bt.clicked = (my_scroller_go_300_900, sc) bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) bt.size_hint_align_set(0.1, 0.9) tb2.pack(bt, 0, 1, 1, 1) bt.show() bt = elementary.Button(win) bt.label_set("to 900 900") bt.clicked = (my_scroller_go_900_900, sc) bt.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND) bt.size_hint_align_set(0.9, 0.9) tb2.pack(bt, 1, 1, 1, 1) bt.show() tb2.show() win.resize(320, 320) win.show()
ef127faaba8fc06ebbb7bd48a4185405cdfc6a4e /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/12343/ef127faaba8fc06ebbb7bd48a4185405cdfc6a4e/test.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 888, 1539, 67, 7475, 329, 12, 2603, 16, 518, 16, 380, 1968, 16, 2826, 4333, 4672, 5657, 273, 930, 814, 18, 3829, 2932, 1017, 1539, 3113, 930, 814, 18, 2247, 49, 67, 24572, 67, 25642, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 888, 1539, 67, 7475, 329, 12, 2603, 16, 518, 16, 380, 1968, 16, 2826, 4333, 4672, 5657, 273, 930, 814, 18, 3829, 2932, 1017, 1539, 3113, 930, 814, 18, 2247, 49, 67, 24572, 67, 25642, ...
if (self.homesize == 0):
if (self.home_mb == 0):
def forward(self): if self.expert.get_active(): mainWindow.goto('manualPart') return
2f74cfb88502c7fe6ae7fb78ca644ae3d1b628f1 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/2375/2f74cfb88502c7fe6ae7fb78ca644ae3d1b628f1/partitions.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 5104, 12, 2890, 4672, 309, 365, 18, 338, 672, 18, 588, 67, 3535, 13332, 2774, 3829, 18, 75, 6302, 2668, 19840, 1988, 6134, 327, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 5104, 12, 2890, 4672, 309, 365, 18, 338, 672, 18, 588, 67, 3535, 13332, 2774, 3829, 18, 75, 6302, 2668, 19840, 1988, 6134, 327, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
print( " %d buckets from %g to %g: " % ( \
print( " %d buckets from %g to %g:" % ( \
def main( argv = None ): bComputeMinMax = False bSample = False bShowGCPs = True bShowMetadata = True bShowRAT=True bStats = False bApproxStats = True bShowColorTable = True bComputeChecksum = False bReportHistograms = False pszFilename = None papszExtraMDDomains = [ ] pszProjection = None hTransform = None #/* Must process GDAL_SKIP before GDALAllRegister(), but we can't call */ #/* GDALGeneralCmdLineProcessor before it needs the drivers to be registered */ #/* for the --format or --formats options */ #for( i = 1; i < argc; i++ ) #{ # if EQUAL(argv[i],"--config") and i + 2 < argc and EQUAL(argv[i + 1], "GDAL_SKIP"): # { # CPLSetConfigOption( argv[i+1], argv[i+2] ); # # i += 2; # } #} # #GDALAllRegister(); if argv is None: argv = sys.argv argv = gdal.GeneralCmdLineProcessor( argv ) if argv is None: return 1 nArgc = len(argv)
81d6440b99467d9a0ab6e45f8378cb349c9bf867 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10290/81d6440b99467d9a0ab6e45f8378cb349c9bf867/gdalinfo.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 12, 5261, 273, 599, 262, 30, 225, 324, 7018, 17454, 273, 1083, 324, 8504, 273, 1083, 324, 5706, 43, 4258, 87, 273, 1053, 324, 5706, 2277, 273, 1053, 324, 5706, 54, 789, 33, 5510,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 12, 5261, 273, 599, 262, 30, 225, 324, 7018, 17454, 273, 1083, 324, 8504, 273, 1083, 324, 5706, 43, 4258, 87, 273, 1053, 324, 5706, 2277, 273, 1053, 324, 5706, 54, 789, 33, 5510,...
child.expect('20\s+25\s+30\s+35\s+40\s+45\s+50\s+55\s+60\s+65', timeout=None)
child.expect('20\s+25\s+30\s+35\s+40\s+45\s+50\s+55\s+60\s+65')
def time(self, n, factor_digits, verbose=0): """ Gives an approximation for the amount of time it will take to find a factor of size factor_digits in a single process on the current computer. This estimate is provided by gmp-ecm's verbose option on a single run of a curve. INPUT: n -- a positive integer factor_digits -- the (estimated) number of digits of the smallest factor EXAMPLES: sage: n = next_prime(11^23)*next_prime(11^37) sage.: ecm.time(n, 20) Expected curves: 77 Expected time: 7.21s sage.: ecm.time(n, 25) Expected curves: 206 Expected time: 1.56m sage.: ecm.time(n, 30, verbose=1) GMP-ECM 6.0.1 [powered by GMP 4.2] [ECM]
86403ddea61c126d9fccd83103efc7ea356d9006 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9890/86403ddea61c126d9fccd83103efc7ea356d9006/ecm.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 813, 12, 2890, 16, 290, 16, 5578, 67, 16649, 16, 3988, 33, 20, 4672, 3536, 611, 3606, 392, 24769, 364, 326, 3844, 434, 813, 518, 903, 4862, 358, 1104, 279, 5578, 434, 963, 5578, 67, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 813, 12, 2890, 16, 290, 16, 5578, 67, 16649, 16, 3988, 33, 20, 4672, 3536, 611, 3606, 392, 24769, 364, 326, 3844, 434, 813, 518, 903, 4862, 358, 1104, 279, 5578, 434, 963, 5578, 67, ...
EXAMPLES: sage:
def _hecke_images(self, i, v): """ Return images of the $i$-th standard basis vector under the Hecke operators $T_p$ for all integers in $v$.
85d8342ac872341b456866c2cd6dd196f00b6a0d /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9417/85d8342ac872341b456866c2cd6dd196f00b6a0d/ambient.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1129, 73, 67, 7369, 12, 2890, 16, 277, 16, 331, 4672, 3536, 2000, 4602, 434, 326, 271, 77, 8, 17, 451, 4529, 10853, 3806, 3613, 326, 670, 762, 73, 12213, 271, 56, 67, 84, 8, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1129, 73, 67, 7369, 12, 2890, 16, 277, 16, 331, 4672, 3536, 2000, 4602, 434, 326, 271, 77, 8, 17, 451, 4529, 10853, 3806, 3613, 326, 670, 762, 73, 12213, 271, 56, 67, 84, 8, 3...
(fd, name) = tempfile.mkstemp(suffix=['.html', '.txt'][text],
(fd, path) = tempfile.mkstemp(suffix=['.html', '.txt'][text],
def handle(self, info=None): info = info or sys.exc_info() self.file.write(reset())
112c62f22997921f6c0b41011f166216cc6db814 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3187/112c62f22997921f6c0b41011f166216cc6db814/cgitb.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1640, 12, 2890, 16, 1123, 33, 7036, 4672, 1123, 273, 1123, 578, 2589, 18, 10075, 67, 1376, 1435, 365, 18, 768, 18, 2626, 12, 6208, 10756, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1640, 12, 2890, 16, 1123, 33, 7036, 4672, 1123, 273, 1123, 578, 2589, 18, 10075, 67, 1376, 1435, 365, 18, 768, 18, 2626, 12, 6208, 10756, 2, -100, -100, -100, -100, -100, -100, -100, -...
self._mi.Check(True)
self._mi.Check(False)
def PlugIt(self, parent): """Adds the view menu entry and registers the event handler""" mw = parent self._log = wx.GetApp().GetLog() if mw != None: self._log("[filebrowser] Installing filebrowser plugin") #---- Add Menu Items ----# mb = mw.GetMenuBar() vm = mb.GetMenuByName("view") self._mi = vm.InsertAlpha(ID_FILEBROWSE, _("File Browser"), _("Open File Browser sidepanel"), wx.ITEM_CHECK, after=ed_glob.ID_PRE_MARK)
e90e499ee5b7bec434002466ca98e8c18ca9a363 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/3530/e90e499ee5b7bec434002466ca98e8c18ca9a363/__init__.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3008, 637, 7193, 12, 2890, 16, 982, 4672, 3536, 3655, 326, 1476, 3824, 1241, 471, 10285, 326, 871, 1838, 8395, 14721, 273, 982, 365, 6315, 1330, 273, 7075, 18, 967, 3371, 7675, 967, 1343...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3008, 637, 7193, 12, 2890, 16, 982, 4672, 3536, 3655, 326, 1476, 3824, 1241, 471, 10285, 326, 871, 1838, 8395, 14721, 273, 982, 365, 6315, 1330, 273, 7075, 18, 967, 3371, 7675, 967, 1343...
split_grid("Pane", "source", "pane", 6)
split_grid("pane", "source", "pane", 6)
def process_panes(config): split_grid("Pane", "source", "pane", 6) clips = [] for suffix in ["tl", "t" , "tr", "bl", "b", "br"]: clips.append("pane/%s-%s.png" % ("Pane", suffix)) combine_images(clips, "pane/Pane-Combined.png", False, config) clips = [] for suffix in ["l", "r"]: clips.append("pane/%s-%s.png" % ("Pane", suffix)) combine_images(clips, "pane/Pane-Combined-Center.png", True, config) add_file("pane/Pane-c.png", config)
b22d0db2222e531160880d72a8983e1303736195 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/5718/b22d0db2222e531160880d72a8983e1303736195/combine.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1207, 67, 7355, 281, 12, 1425, 4672, 1416, 67, 5222, 2932, 29009, 3113, 315, 3168, 3113, 315, 29009, 3113, 1666, 13, 4942, 1121, 273, 5378, 364, 3758, 316, 8247, 6172, 3113, 315, 88, 6, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1207, 67, 7355, 281, 12, 1425, 4672, 1416, 67, 5222, 2932, 29009, 3113, 315, 3168, 3113, 315, 29009, 3113, 1666, 13, 4942, 1121, 273, 5378, 364, 3758, 316, 8247, 6172, 3113, 315, 88, 6, ...
titles.append(' '.join(title_and_subtitle))
titles.append(' '.join(title_and_subtitle).strip())
def read_short_title(line): prefix_len = line[1] title_and_subtitle = [] title = [] for k, v in get_subfields(line, ['a', 'b']): v = v.strip(' /,;:') title_and_subtitle.append(v) if k == 'a': title.append(v) titles = [' '.join(title)] if title != title_and_subtitle: titles.append(' '.join(title_and_subtitle)) if prefix_len and prefix_len != '0': try: prefix_len = int(prefix_len) titles += [t[prefix_len:] for t in titles] except ValueError: pass return [str(normalize(i)[:25]) for i in titles]
c07ac8f2a16760a90d839b055a12745da8be6087 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/3913/c07ac8f2a16760a90d839b055a12745da8be6087/fast_parse.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 67, 6620, 67, 2649, 12, 1369, 4672, 1633, 67, 1897, 273, 980, 63, 21, 65, 2077, 67, 464, 67, 1717, 2649, 273, 5378, 2077, 273, 5378, 364, 417, 16, 331, 316, 336, 67, 1717, 2821,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 67, 6620, 67, 2649, 12, 1369, 4672, 1633, 67, 1897, 273, 980, 63, 21, 65, 2077, 67, 464, 67, 1717, 2649, 273, 5378, 2077, 273, 5378, 364, 417, 16, 331, 316, 336, 67, 1717, 2821,...
d['__domain'] = [(groupby,'=',alldata[d['id']][groupby] or False)] + domain
d['__domain'] = [(groupby,'>=',alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01','%Y-%m-%d').strftime('%Y-%m-%d') or False),\ (groupby,'<=',alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days),'%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain elif fget[groupby]['type'] == 'many2one': d[groupby] = d[groupby] and d[groupby][1] or ''
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None): context = context or {} self.pool.get('ir.model.access').check(cr, uid, self._name, 'read', context=context) if not fields: fields = self._columns.keys()
27dbc19d1f7a0ba675a91c3ae1c88699c1d30ad7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/27dbc19d1f7a0ba675a91c3ae1c88699c1d30ad7/orm.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 67, 1655, 12, 2890, 16, 4422, 16, 4555, 16, 2461, 16, 1466, 16, 13126, 16, 1384, 33, 20, 16, 1800, 33, 7036, 16, 819, 33, 7036, 4672, 819, 273, 819, 578, 2618, 365, 18, 6011, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 67, 1655, 12, 2890, 16, 4422, 16, 4555, 16, 2461, 16, 1466, 16, 13126, 16, 1384, 33, 20, 16, 1800, 33, 7036, 16, 819, 33, 7036, 4672, 819, 273, 819, 578, 2618, 365, 18, 6011, ...
plugin_needed = []
plugin_needed = set()
def load(self, path): path = expand_path(path, config().basedir) if self.is_loaded(): unload_database() if not os.path.exists(path): self.load_failed = True raise LoadError try: infile = file(path, 'rb') db = cPickle.load(infile) self.start_date = db[0] self.categories = db[1] self.facts = db[2] self.fact_views = db[3] self.cards = db[4] infile.close() self.load_failed = False except: self.load_failed = True raise InvalidFormatError(stack_trace=True) # Deal with clones and plugins, also plugins for parent classe. # Because of the sip bugs, card types here are actually still card # type ids. plugin_needed = [] clone_needed = [] active_id = set(card_type.id for card_type in card_types()) for id in set(card.fact.card_type for card in self.cards): while "." in id: # Move up one level of the hierarchy. id, child_name = id.rsplit(".", 1) if id.endswith("_CLONED"): id = id.replace("_CLONED", "") clone_needed.append((id, child_name)) if id not in active_id: plugin_needed.add(id) if id not in active_id: plugin_needed.add(id) print 'plugin needed', plugin_needed print 'clone needed', clone_needed # Activate necessary plugins. for card_type_id in plugin_needed: try: for plugin in plugins(): if plugin.provides == "card_type" and \ plugin.id == card_type_id: plugin.activate() break else: self.__init__() self.load_failed = True raise MissingPluginError(info='id') except: self.__init__() self.load_failed = True raise PluginError(stack_trace=True) # Create necessary clones. for parent_type_id, clone_name in clone_needed: parent_instance = card_type_by_id(parent_type_id) parent_instance.clone(child_name) # Work around a sip bug: don't store card types, but their ids. for f in self.facts: f.card_type = card_type_by_id(f.card_type) # TODO: This was to remove database inconsistencies. Still needed? #for c in self.categories: # self.remove_category_if_unused(c) config()["path"] = contract_path(path, config().basedir) log().loaded_database() for f in component_manager.get_all("function_hook", "after_load"): f.run()
25359d024c5102be1b8221d7000b0b603bf7019e /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/1240/25359d024c5102be1b8221d7000b0b603bf7019e/pickle.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1262, 12, 2890, 16, 589, 4672, 589, 273, 4542, 67, 803, 12, 803, 16, 642, 7675, 31722, 13, 309, 365, 18, 291, 67, 4230, 13332, 27060, 67, 6231, 1435, 309, 486, 1140, 18, 803, 18, 180...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1262, 12, 2890, 16, 589, 4672, 589, 273, 4542, 67, 803, 12, 803, 16, 642, 7675, 31722, 13, 309, 365, 18, 291, 67, 4230, 13332, 27060, 67, 6231, 1435, 309, 486, 1140, 18, 803, 18, 180...
edition['oclc'] = oclc
edition['oclc_numbers'] = oclc
def find_oclc(r, edition): oclc = [] for f in r.get_fields('035'): if 'a' not in f.contents: continue for a in f.contents['a']: m = re_oclc.match(f.contents['a'][0]) if m: oclc.append(m.group(1)) if oclc: edition['oclc'] = oclc
da804bedd5a86ca4953a76539728719f215e8cf0 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/3913/da804bedd5a86ca4953a76539728719f215e8cf0/parse.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 67, 504, 17704, 12, 86, 16, 28432, 4672, 320, 830, 71, 273, 5378, 364, 284, 316, 436, 18, 588, 67, 2821, 2668, 4630, 25, 11, 4672, 309, 296, 69, 11, 486, 316, 284, 18, 3980, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 67, 504, 17704, 12, 86, 16, 28432, 4672, 320, 830, 71, 273, 5378, 364, 284, 316, 436, 18, 588, 67, 2821, 2668, 4630, 25, 11, 4672, 309, 296, 69, 11, 486, 316, 284, 18, 3980, ...
x = pow(p, 1.0/alpha)
x = p ** (1.0/alpha)
def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function!
42406e6f27e9a42e91db8706d897e0b478b13a4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/42406e6f27e9a42e91db8706d897e0b478b13a4d/random.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 23411, 20689, 297, 3840, 12, 2890, 16, 4190, 16, 6796, 4672, 3536, 31300, 7006, 18, 225, 2288, 326, 9601, 445, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 23411, 20689, 297, 3840, 12, 2890, 16, 4190, 16, 6796, 4672, 3536, 31300, 7006, 18, 225, 2288, 326, 9601, 445, 5, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -10...
return "Sequence of Uppuluri-Carpenter numbers"
""" EXAMPLES: sage: sloane.A000587._repr_() 'Sequence of Uppuluri-Carpenter numbers' """ return "Sequence of Uppuluri-Carpenter numbers"
def _repr_(self): return "Sequence of Uppuluri-Carpenter numbers"
7b02e5455d5d52d6dcc7f2314cdd56803031a1c6 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9890/7b02e5455d5d52d6dcc7f2314cdd56803031a1c6/sloane_functions.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 12715, 67, 12, 2890, 4672, 327, 315, 4021, 434, 1948, 84, 332, 1650, 17, 39, 11441, 2328, 5600, 6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 12715, 67, 12, 2890, 4672, 327, 315, 4021, 434, 1948, 84, 332, 1650, 17, 39, 11441, 2328, 5600, 6, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
def __new__(cls, name, bus=None, flags=0): if bus == None: bus = dbus.Bus() conn = bus.get_connection() retval = dbus.dbus_bindings.bus_request_name(conn, name, flags) if retval == dbus.dbus_bindings.REQUEST_NAME_REPLY_PRIMARY_OWNER: pass elif retval == dbus.dbus_bindings.REQUEST_NAME_REPLY_IN_QUEUE: pass elif retval == dbus.dbus_bindings.REQUEST_NAME_REPLY_EXISTS: raise NameExistsException(name) elif retval == dbus.dbus_bindings.REQUEST_NAME_REPLY_ALREADY_OWNER: pass else: raise RuntimeError('requesting bus name %s returned unexpected value %s' % (name, retval)) bus_name = object.__new__(cls) bus_name._bus = bus bus_name._name = name bus_name._conn = conn return bus_name def __init__(self, *args, **keywords): pass def __del__(self): dbus.dbus_bindings.bus_release_name(self._bus.get_connection(), self._name) pass def get_bus(self): """Get the Bus this Service is on""" return self._bus def get_name(self): """Get the name of this service""" return self._name def get_connection(self): """Get the connection for this service""" return self._conn def __repr__(self): return '<dbus.service.BusName %s on %r at % __str__ = __repr__ class OneTime (dbus.service.Object):
def __new__(cls, name, bus=None, flags=0): # get default bus if bus == None: bus = dbus.Bus()
46ad325fbd04b90ed67ad2df2faeb7236742895a /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/12354/46ad325fbd04b90ed67ad2df2faeb7236742895a/onetime.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2704, 972, 12, 6429, 16, 508, 16, 5766, 33, 7036, 16, 2943, 33, 20, 4672, 468, 336, 805, 5766, 309, 5766, 422, 599, 30, 5766, 273, 21866, 18, 7086, 1435, 2, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2704, 972, 12, 6429, 16, 508, 16, 5766, 33, 7036, 16, 2943, 33, 20, 4672, 468, 336, 805, 5766, 309, 5766, 422, 599, 30, 5766, 273, 21866, 18, 7086, 1435, 2, -100, -100, -100, -...
self.get_data(rva),
self.get_data( rva, Structure(format).sizeof() ),
def parse_directory_tls(self, rva, size): """""" if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: format = self.__IMAGE_TLS_DIRECTORY_format__ elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: format = self.__IMAGE_TLS_DIRECTORY64_format__ tls_struct = self.__unpack_data__( format, self.get_data(rva), file_offset = self.get_offset_from_rva(rva)) if not tls_struct: return None return TlsData( struct = tls_struct )
1d5b4f161f415406d1d384dc2319874979bcdef8 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/213/1d5b4f161f415406d1d384dc2319874979bcdef8/pefile.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 67, 5149, 67, 17116, 12, 2890, 16, 5633, 69, 16, 963, 4672, 3536, 8395, 225, 309, 365, 18, 1423, 67, 2399, 422, 14930, 67, 7557, 67, 49, 22247, 67, 1423, 30, 740, 273, 365, 161...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 67, 5149, 67, 17116, 12, 2890, 16, 5633, 69, 16, 963, 4672, 3536, 8395, 225, 309, 365, 18, 1423, 67, 2399, 422, 14930, 67, 7557, 67, 49, 22247, 67, 1423, 30, 740, 273, 365, 161...
EndSection
SectionEnd
def nsis_generate_script(): filename = os.path.join(install_path,"urk.nsi") f = file(filename,'w') #header f.write(r"""
2c6f9c3669b87839521ac4c9079c45c4d0fa31a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10693/2c6f9c3669b87839521ac4c9079c45c4d0fa31a2/install.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3153, 291, 67, 7163, 67, 4263, 13332, 1544, 273, 1140, 18, 803, 18, 5701, 12, 5425, 67, 803, 10837, 20628, 18, 2387, 77, 7923, 284, 273, 585, 12, 3459, 11189, 91, 6134, 468, 3374, 284,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3153, 291, 67, 7163, 67, 4263, 13332, 1544, 273, 1140, 18, 803, 18, 5701, 12, 5425, 67, 803, 10837, 20628, 18, 2387, 77, 7923, 284, 273, 585, 12, 3459, 11189, 91, 6134, 468, 3374, 284,...
def field2text(v, nl=re.compile('\r\n\|\n\r').search):
def field2text(v, nl=re.compile('\r\n|\n\r').search):
def field2text(v, nl=re.compile('\r\n\|\n\r').search): if hasattr(v,'read'): v=v.read() else: v=str(v) mo = nl(v) if mo is None: return v l = mo.start(0) r=[] s=0 while l >= s: r.append(v[s:l]) s=l+2 mo=nl(v,s) if mo is None: l=-1 else: l=mo.start(0) r.append(v[s:]) return join(r,'\n')
fde9020ddb0f73b7c6f3ffd56de513c6fbc9126e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9658/fde9020ddb0f73b7c6f3ffd56de513c6fbc9126e/Converters.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 652, 22, 955, 12, 90, 16, 7741, 33, 266, 18, 11100, 2668, 64, 86, 64, 82, 8960, 82, 64, 86, 16063, 3072, 4672, 309, 3859, 12, 90, 11189, 896, 11, 4672, 331, 33, 90, 18, 896, 1435, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 652, 22, 955, 12, 90, 16, 7741, 33, 266, 18, 11100, 2668, 64, 86, 64, 82, 8960, 82, 64, 86, 16063, 3072, 4672, 309, 3859, 12, 90, 11189, 896, 11, 4672, 331, 33, 90, 18, 896, 1435, ...
- ``diffusion_matrix`` - if True the given matrix `l` is transformed to a matrix which performs the same operation over GF(2) as `l` over `GF(2^n)` (default: False).
- ``diffusion_matrix`` - if True the given matrix ``l`` is transformed to a matrix which performs the same operation over `\mathbb{F}_2` as ``l`` over `GF(2^n)` (default: ``False``).
def phi(self, l, diffusion_matrix=False): r""" Given a list/matrix of elements in `GF(2^n)`, return a matching list/matrix of elements in `GF(2)`.
361ee6a2b2c797fdc77944b615cb058df81aca2e /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/9417/361ee6a2b2c797fdc77944b615cb058df81aca2e/sr.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7706, 12, 2890, 16, 328, 16, 3122, 7063, 67, 5667, 33, 8381, 4672, 436, 8395, 16803, 279, 666, 19, 5667, 434, 2186, 316, 1375, 43, 42, 12, 22, 66, 82, 13, 9191, 327, 279, 3607, 666, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7706, 12, 2890, 16, 328, 16, 3122, 7063, 67, 5667, 33, 8381, 4672, 436, 8395, 16803, 279, 666, 19, 5667, 434, 2186, 316, 1375, 43, 42, 12, 22, 66, 82, 13, 9191, 327, 279, 3607, 666, ...
proposal_id = c.attachment.proposal.id
def _delete(self, id): c.attachment = Attachment.find_by_id(id) author_id = c.attachment.proposal.person.id
3631dec13d497a76597858ec5cc4a1452955795d /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/12856/3631dec13d497a76597858ec5cc4a1452955795d/attachment.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 3733, 12, 2890, 16, 612, 4672, 276, 18, 11461, 273, 15939, 18, 4720, 67, 1637, 67, 350, 12, 350, 13, 2869, 67, 350, 273, 276, 18, 11461, 18, 685, 8016, 18, 12479, 18, 350, 2, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 3733, 12, 2890, 16, 612, 4672, 276, 18, 11461, 273, 15939, 18, 4720, 67, 1637, 67, 350, 12, 350, 13, 2869, 67, 350, 273, 276, 18, 11461, 18, 685, 8016, 18, 12479, 18, 350, 2, ...
(lang, tt, tools.ustr(name)))
(lang or '', tt, tools.ustr(name)))
def _get_source(self, cr, uid, name, tt, lang, source=None): if not lang: return '' if source: #if isinstance(source, unicode): # source = source.encode('utf8') cr.execute('select value ' \ 'from ir_translation ' \ 'where lang=%s ' \ 'and type=%s ' \ 'and name=%s ' \ 'and src=%s', (lang, tt, tools.ustr(name), source)) else: cr.execute('select value ' \ 'from ir_translation ' \ 'where lang=%s ' \ 'and type=%s ' \ 'and name=%s', (lang, tt, tools.ustr(name))) res = cr.fetchone() trad = res and res[0] or '' return trad
0ef048f782408b7441551afb29b701619fa93c92 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12853/0ef048f782408b7441551afb29b701619fa93c92/ir_translation.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 67, 3168, 12, 2890, 16, 4422, 16, 4555, 16, 508, 16, 3574, 16, 3303, 16, 1084, 33, 7036, 4672, 309, 486, 3303, 30, 327, 875, 309, 1084, 30, 468, 430, 1549, 12, 3168, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 67, 3168, 12, 2890, 16, 4422, 16, 4555, 16, 508, 16, 3574, 16, 3303, 16, 1084, 33, 7036, 4672, 309, 486, 3303, 30, 327, 875, 309, 1084, 30, 468, 430, 1549, 12, 3168, 16, ...
for mstr in movstrs: try: if type == None: move = parseAny (board, mstr) elif type == SAN: move = parseSAN (board, mstr) elif type == AN: move = parseAN (board, mstr) elif type == LAN: move = parseLAN (board, mstr) except ParsingError: break if testvalidate: if not validateMove (board, move):
try: for mstr in movstrs: try: if type == None: move = parseAny (board, mstr) elif type == SAN: move = parseSAN (board, mstr) elif type == AN: move = parseAN (board, mstr) elif type == LAN: move = parseLAN (board, mstr) except ParsingError:
def listToMoves (board, movstrs, type=None, testvalidate=False): moves = [] board.lock.acquire() for mstr in movstrs: try: if type == None: move = parseAny (board, mstr) elif type == SAN: move = parseSAN (board, mstr) elif type == AN: move = parseAN (board, mstr) elif type == LAN: move = parseLAN (board, mstr) except ParsingError: # We expect a ParsingError to be raised when parsing "old" lines # from analyzing engines, which haven't yet noticed their new tasks break if testvalidate: if not validateMove (board, move): break moves.append(move) board.applyMove(move) for move in moves: board.popMove() board.lock.release() return moves
b4a3ce51f9000ef8959d6e9bc0976c0b739d660a /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/5339/b4a3ce51f9000ef8959d6e9bc0976c0b739d660a/lmove.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 666, 774, 19297, 261, 3752, 16, 5730, 24432, 16, 618, 33, 7036, 16, 1842, 5662, 33, 8381, 4672, 13934, 273, 5378, 225, 11094, 18, 739, 18, 1077, 1039, 1435, 364, 312, 701, 316, 5730, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 666, 774, 19297, 261, 3752, 16, 5730, 24432, 16, 618, 33, 7036, 16, 1842, 5662, 33, 8381, 4672, 13934, 273, 5378, 225, 11094, 18, 739, 18, 1077, 1039, 1435, 364, 312, 701, 316, 5730, 2...
"<function <lambda> at 0x"))
"<function <lambda"))
def test_lambda(self): self.failUnless(repr(lambda x: x).startswith( "<function <lambda> at 0x")) # XXX anonymous functions? see func_repr
ccea6d5680d2e68084812daaabc266cd5e2f962b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ccea6d5680d2e68084812daaabc266cd5e2f962b/test_repr.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 14661, 12, 2890, 4672, 365, 18, 6870, 984, 2656, 12, 12715, 12, 14661, 619, 30, 619, 2934, 17514, 1918, 12, 3532, 915, 411, 14661, 6, 3719, 468, 11329, 13236, 4186, 35, 225, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 14661, 12, 2890, 4672, 365, 18, 6870, 984, 2656, 12, 12715, 12, 14661, 619, 30, 619, 2934, 17514, 1918, 12, 3532, 915, 411, 14661, 6, 3719, 468, 11329, 13236, 4186, 35, 225, ...
cr.select_font_face("DejaVu", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.select_font_face("DejaVu", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
def _get_font_parameters(self, cr, fontsize): cr.select_font_face("DejaVu", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) cr.set_font_size(fontsize * 1.2) heading_fascent, heading_fdescent, heading_fheight = cr.font_extents()[:3]
0280212f34e2e8635e73455f44e90ed69c35e6cc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9326/0280212f34e2e8635e73455f44e90ed69c35e6cc/street_index.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 67, 5776, 67, 3977, 12, 2890, 16, 4422, 16, 14869, 4672, 4422, 18, 4025, 67, 5776, 67, 865, 2932, 758, 12333, 58, 89, 3113, 14503, 18, 25221, 67, 4559, 6856, 67, 15480, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 67, 5776, 67, 3977, 12, 2890, 16, 4422, 16, 14869, 4672, 4422, 18, 4025, 67, 5776, 67, 865, 2932, 758, 12333, 58, 89, 3113, 14503, 18, 25221, 67, 4559, 6856, 67, 15480, 16, ...
this = apply(_quickfix.new_UnderlyingSecurityID, args)
this = _quickfix.new_UnderlyingSecurityID(*args)
def __init__(self, *args): this = apply(_quickfix.new_UnderlyingSecurityID, args) try: self.this.append(this) except: self.this = this
7e632099fd421880c8c65fb0cf610d338d115ee9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8819/7e632099fd421880c8c65fb0cf610d338d115ee9/quickfix.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 380, 1968, 4672, 333, 273, 389, 19525, 904, 18, 2704, 67, 14655, 6291, 4368, 734, 30857, 1968, 13, 775, 30, 365, 18, 2211, 18, 6923, 12, 2211, 13, 1335, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 380, 1968, 4672, 333, 273, 389, 19525, 904, 18, 2704, 67, 14655, 6291, 4368, 734, 30857, 1968, 13, 775, 30, 365, 18, 2211, 18, 6923, 12, 2211, 13, 1335, ...
if options['tests'].get('author'):
if options['tests'].get('authors'):
def _check(docmap, modules, options): """ Run completeness checks on the objects in the given documentation map. By default, C{_check} checks for docstrings in all public modules, classes, functions, and properties. Additional checks can be added with the C{'tests'} option: - C{private}: Also checks private objects. - C{full}: Also checks variables, parameters, and return values. @param docmap: A documentation map containing the documentation for the objects whose API documentation should be created. @param options: Options from the command-line arguments. @type options: C{dict} """ from epydoc.checker import DocChecker # Run completeness checks. if options['verbosity'] > 0: print >>sys.stderr, 'Performing completeness checks...' checker = DocChecker(docmap, modules) if options['tests'].get('all'): for test in TESTS: options['tests'][test] = 1 # Run the checks checks = 0 if (options['tests'].get('basic') or options['tests'].get('full') or options['tests'].get('private')): checks |= (DocChecker.MODULE | DocChecker.CLASS | DocChecker.FUNC | DocChecker.PROPERTY | DocChecker.DESCR_LAZY | DocChecker.PUBLIC) if options['tests'].get('private'): checks |= DocChecker.PRIVATE if options['tests'].get('full'): checks |= DocChecker.ALL_T if options['tests'].get('type'): DocChecker.TYPE passed_checks = checker.check(checks) if options['tests'].get('author'): checks = DocChecker.MODULE | DocChecker.PUBLIC | DocChecker.AUTHOR if options['tests'].get('private'): checks |= DocChecker.PRIVATE passed_checks = checker.check(checks) and passed_checks if options['tests'].get('version'): checks = DocChecker.MODULE | DocChecker.PUBLIC | DocChecker.VERSION if options['tests'].get('private'): checks |= DocChecker.PRIVATE passed_checks = checker.check(checks) and passed_checks if passed_checks and options['verbosity'] > 0: print >>sys.stderr, ' All checks passed!'
55668059b93c1d811a0940669739c36e1bda8a13 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11420/55668059b93c1d811a0940669739c36e1bda8a13/cli.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1893, 12, 2434, 1458, 16, 4381, 16, 702, 4672, 3536, 1939, 2302, 15681, 4271, 603, 326, 2184, 316, 326, 864, 7323, 225, 852, 18, 225, 2525, 805, 16, 385, 95, 67, 1893, 97, 4271, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1893, 12, 2434, 1458, 16, 4381, 16, 702, 4672, 3536, 1939, 2302, 15681, 4271, 603, 326, 2184, 316, 326, 864, 7323, 225, 852, 18, 225, 2525, 805, 16, 385, 95, 67, 1893, 97, 4271, ...
str += ' - ' + dom_to_html(param.descr(), container)
pdescr = self._dom_to_html(param.descr(), container, 8) str += ' -\n %s' % pdescr.rstrip() str += '\n'
def _func_details(self, functions, cls, heading='Function Details'): """ @return: The HTML code for a function details table. This is used by L{_module_to_html} to describe the functions in a module; and by L{_class_to_html} to describe member functions. @rtype: C{string} """ functions = self._sort(functions) if len(functions) == 0: return '' str = self._table_header(heading, 'details')+'</table>'
62ff4e473663c51d0a945a451e36120f11d9450a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11420/62ff4e473663c51d0a945a451e36120f11d9450a/html.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 644, 67, 6395, 12, 2890, 16, 4186, 16, 2028, 16, 11053, 2218, 2083, 21897, 11, 4672, 3536, 632, 2463, 30, 1021, 3982, 981, 364, 279, 445, 3189, 1014, 18, 225, 1220, 353, 1399, 635...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 644, 67, 6395, 12, 2890, 16, 4186, 16, 2028, 16, 11053, 2218, 2083, 21897, 11, 4672, 3536, 632, 2463, 30, 1021, 3982, 981, 364, 279, 445, 3189, 1014, 18, 225, 1220, 353, 1399, 635...
bk.log.error(wc.LOG_FILTER, "Empty image data found at %r (%r)", url, buf.getvalue()) else: attrs['imgsize_blocked'] = \
return '' attrs['imgsize_blocked'] = \
def finish (self, data, **attrs): # note: if attrs['blocked'] is True, then the blockdata is # already sent out if not attrs.has_key('imgsize_buf'): # do not block this image return data if attrs['imgsize_blocked']: # block this image return '' buf = attrs['imgsize_buf'] if buf.closed: return data buf.write(data) url = attrs['url'] pos = buf.tell() if pos <= 0: bk.log.error(wc.LOG_FILTER, "Empty image data found at %r (%r)", url, buf.getvalue()) else: attrs['imgsize_blocked'] = \ not self.check_sizes(buf, attrs['imgsize_sizes'], url, finish=True) data = buf.getvalue() buf.close() if attrs['imgsize_blocked']: return self.blockdata return data
a560a7dc4e0e4176b308bdceefa2936092df33f7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a560a7dc4e0e4176b308bdceefa2936092df33f7/ImageSize.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4076, 261, 2890, 16, 501, 16, 2826, 7039, 4672, 468, 4721, 30, 309, 3422, 3292, 23156, 3546, 353, 1053, 16, 1508, 326, 1203, 892, 353, 468, 1818, 3271, 596, 309, 486, 3422, 18, 5332, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4076, 261, 2890, 16, 501, 16, 2826, 7039, 4672, 468, 4721, 30, 309, 3422, 3292, 23156, 3546, 353, 1053, 16, 1508, 326, 1203, 892, 353, 468, 1818, 3271, 596, 309, 486, 3422, 18, 5332, 6...
self.remoteSiteTop,'db.xml'),
self.remoteSiteTop, os.path.basename(sys.argv[0]) \ + '.xml'),
def __init__(self): self.siteTop = Pathname('siteTop', 'Root of the tree where the website is generated and thus where *remoteSiteTop* is cached on the local system', default=os.getcwd()) self.remoteSiteTop = Pathname('remoteSiteTop', 'Root of the remote tree that holds the published website (ex: url:/var/cache).', default='') self.installTop = Pathname('installTop', 'Root of the tree for installed bin/, include/, lib/, ...', default=os.getcwd()) self.srcTop = Pathname('srcTop', 'Root of the tree where the source code under revision control lives on the local machine.',self.siteTop,default='reps') self.environ = { 'buildTop': Pathname('buildTop', 'Root of the tree where intermediate files are created.', self.siteTop,default='build'), 'srcTop' : self.srcTop, 'binDir': Pathname('binDir', 'Root of the tree where executables are installed', self.installTop), 'includeDir': Pathname('includeDir', 'Root of the tree where include files are installed', self.installTop), 'libDir': Pathname('libDir', 'Root of the tree where libraries are installed', self.installTop), 'etcDir': Pathname('etcDir', 'Root of the tree where extra files are installed', self.installTop,'etc'), 'shareDir': Pathname('shareDir', 'Directory where the shared files are installed.', self.installTop,'share'), 'duplicateDir': Pathname('duplicateDir', 'Directory where important directory trees on the remote machine are duplicated.', self.installTop,'duplicate'), 'siteTop': self.siteTop, 'remoteSiteTop': self.remoteSiteTop, 'remoteIndex': Pathname('remoteIndex', 'Index file with projects dependencies information', self.remoteSiteTop,'db.xml'), 'remoteSrcTop': Pathname('remoteSrcTop', 'Root of the tree on the remote machine where repositories are located', self.remoteSiteTop,'reps'), 'darwinTargetVolume': SingleChoice('darwinTargetVolume', None, descr='Destination of installed packages on a Darwin local machine. Installing on the "LocalSystem" requires administrator privileges.', choices=[ ['LocalSystem', 'install packages on the system root for all users'], ['CurrentUserHomeDirectory', 'install packages for the current user only'] ]), 'distHost': HostPlatform('distHost') }
4a9bbf3c5ea7429f402e63ad28ed48a40972ea00 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1360/4a9bbf3c5ea7429f402e63ad28ed48a40972ea00/dws.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 4672, 365, 18, 4256, 3401, 273, 2666, 529, 2668, 4256, 3401, 2187, 296, 2375, 434, 326, 2151, 1625, 326, 12504, 353, 4374, 471, 12493, 1625, 380, 7222, 4956, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 4672, 365, 18, 4256, 3401, 273, 2666, 529, 2668, 4256, 3401, 2187, 296, 2375, 434, 326, 2151, 1625, 326, 12504, 353, 4374, 471, 12493, 1625, 380, 7222, 4956, 3...
def test07_TxnTruncate(self):
def test08_TxnTruncate(self):
def test07_TxnTruncate(self): d = self.d if verbose: print '\n', '-=' * 30 print "Running %s.test07_TxnTruncate..." % self.__class__.__name__
02ec8a6bc5174b4725b144ea32bc810ca238d4fa /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/8125/02ec8a6bc5174b4725b144ea32bc810ca238d4fa/test_basics.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 6840, 67, 13789, 25871, 12, 2890, 4672, 302, 273, 365, 18, 72, 309, 3988, 30, 1172, 2337, 82, 2187, 2400, 2218, 380, 5196, 1172, 315, 7051, 738, 87, 18, 3813, 8642, 67, 13789, 25...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 6840, 67, 13789, 25871, 12, 2890, 4672, 302, 273, 365, 18, 72, 309, 3988, 30, 1172, 2337, 82, 2187, 2400, 2218, 380, 5196, 1172, 315, 7051, 738, 87, 18, 3813, 8642, 67, 13789, 25...
child.setProp('href', str( os.path.normpath(os.path.join(xsl_path, file))))
child.setProp('href', urllib.quote(str( os.path.normpath(os.path.join(xsl_path, file)))))
def create_rml(self, cr, xml, uid, context=None): if not context: context={} service = netsvc.LocalService("object_proxy")
a93b474bfc5651306ace9a6a066f0aee8172c13b /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12853/a93b474bfc5651306ace9a6a066f0aee8172c13b/interface.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 86, 781, 12, 2890, 16, 4422, 16, 2025, 16, 4555, 16, 819, 33, 7036, 4672, 309, 486, 819, 30, 819, 12938, 1156, 273, 21954, 4227, 18, 25635, 2932, 1612, 67, 5656, 7923, 2, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 86, 781, 12, 2890, 16, 4422, 16, 2025, 16, 4555, 16, 819, 33, 7036, 4672, 309, 486, 819, 30, 819, 12938, 1156, 273, 21954, 4227, 18, 25635, 2932, 1612, 67, 5656, 7923, 2, -1...
self.addFile(os.path.join(root, name))
self.add_file(os.path.join(root, name))
def addDirectory(self, dirname): for root, dirs, files in os.walk(os.path.join(self.dist_dir, dirname)): for name in files: self.addFile(os.path.join(root, name))
16717514d85be409df247e58637855a19e0ad04f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12354/16717514d85be409df247e58637855a19e0ad04f/setup.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 2853, 12, 2890, 16, 4283, 4672, 364, 1365, 16, 7717, 16, 1390, 316, 1140, 18, 11348, 12, 538, 18, 803, 18, 5701, 12, 2890, 18, 4413, 67, 1214, 16, 4283, 3719, 30, 364, 508, 316,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 2853, 12, 2890, 16, 4283, 4672, 364, 1365, 16, 7717, 16, 1390, 316, 1140, 18, 11348, 12, 538, 18, 803, 18, 5701, 12, 2890, 18, 4413, 67, 1214, 16, 4283, 3719, 30, 364, 508, 316,...
elif mbody == None or blen > mblen: if mbody != None and blen - mblen < 7 and mblen > 7 and mbody[-4:] == '\r\n\r\n':
elif mbody == None: raise Exception('Missed SIP body, %d bytes expected' % blen) elif blen > mblen: if blen - mblen < 7 and mblen > 7 and mbody[-4:] == '\r\n\r\n':
def __init__(self, buf = None): self.headers = [] if buf == None: return # Locate a body mbody = None for bdel in ('\r\n\r\n', '\r\r', '\n\n'): boff = buf.find(bdel) if boff != -1: mbody = buf[boff + len(bdel):] buf = buf[:boff] if len(mbody) == 0: mbody = None break # Split message into lines and put aside start line lines = buf.splitlines() self.setSL(lines[0]) i = 2 while i < len(lines): if lines[i][0] in (' ', '\t'): lines[i - 1] += ' ' + lines[i].strip() del lines[i] else: i += 1 # Parse headers for line in lines[1:]: try: self.headers.append(SipHeader(line, fixname = True)) except ESipHeaderCSV, einst: for body in einst.bodys: self.headers.append(SipHeader(name = einst.name, bodys = body)) except ESipHeaderIgnore: continue if self.countHFs('via') == 0: raise Exception('Via HF is missed') if self.countHFs('content-length') > 0: blen = self.getHFBody('content-length').number if mbody == None: mblen = 0 else: mblen = len(mbody) if blen == 0: mbody = None mblen = 0 elif mbody == None or blen > mblen: if mbody != None and blen - mblen < 7 and mblen > 7 and mbody[-4:] == '\r\n\r\n': # XXX: we should not really be doing this, but it appears to be # a common off-by-one/two/.../six problem with SDPs generates by # the consumer-grade devices. print 'Truncated SIP body, %d bytes expected, %d received, fixing...' % (blen, mblen) blen = mblen else: # XXX: Should generate 400 Bad Request if such condition # happens with request raise Exception('Truncated SIP body, %d bytes expected, %d received' % (blen, mblen)) elif blen < mblen: mbody = mbody[:blen] mblen = blen if mbody != None: ct = self.getHFs('content-type') if len(ct) > 0: self.body = MsgBody(mbody, str(ct[0].body).lower()) self.delHFs('content-type') self.delHFs('content-length') else: self.body = MsgBody(mbody)
6e244a74812b24f83affda6b3db3b75fcc32f52f /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/8853/6e244a74812b24f83affda6b3db3b75fcc32f52f/SipMsg.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1681, 273, 599, 4672, 365, 18, 2485, 273, 5378, 309, 1681, 422, 599, 30, 327, 468, 2851, 340, 279, 1417, 312, 3432, 273, 599, 364, 324, 3771, 316, 7707, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1681, 273, 599, 4672, 365, 18, 2485, 273, 5378, 309, 1681, 422, 599, 30, 327, 468, 2851, 340, 279, 1417, 312, 3432, 273, 599, 364, 324, 3771, 316, 7707, ...
if not is_unavailable_exception(e): self.fail("%s\n%s" % (e, e.headers))
self.fail("%s\n%s" % (e, e.headers))
def test_introspection3(self): # the SimpleXMLRPCServer doesn't support signatures, but # at least check that we can try making the call try: p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT) divsig = p.system.methodSignature('div') self.assertEqual(divsig, 'signatures not supported') except xmlrpclib.ProtocolError as e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, e.headers))
15c974651f7bb8e54a008359cf306a8e8ec13c12 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/8546/15c974651f7bb8e54a008359cf306a8e8ec13c12/test_xmlrpc.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 474, 26362, 23, 12, 2890, 4672, 468, 326, 4477, 4201, 8087, 2081, 3302, 1404, 2865, 14862, 16, 1496, 468, 622, 4520, 866, 716, 732, 848, 775, 10480, 326, 745, 775, 30, 293, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 474, 26362, 23, 12, 2890, 4672, 468, 326, 4477, 4201, 8087, 2081, 3302, 1404, 2865, 14862, 16, 1496, 468, 622, 4520, 866, 716, 732, 848, 775, 10480, 326, 745, 775, 30, 293, 2...
debuggerArgs=None, debuggerInteractive=False):
debuggerArgs=None, debuggerInteractive=False, profileName=None):
def runTests(self, xpcshell, xrePath=None, symbolsPath=None, manifest=None, testdirs=[], testPath=None, interactive=False, logfiles=True, thisChunk=1, totalChunks=1, debugger=None, debuggerArgs=None, debuggerInteractive=False): """Run xpcshell tests.
6ce9d307cde73f0356320551c187db3ef71ec963 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11102/6ce9d307cde73f0356320551c187db3ef71ec963/runxpcshelltests.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 14650, 12, 2890, 16, 619, 2436, 10304, 16, 619, 266, 743, 33, 7036, 16, 7963, 743, 33, 7036, 16, 5643, 33, 7036, 16, 1842, 8291, 22850, 6487, 1842, 743, 33, 7036, 16, 12625, 33, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 14650, 12, 2890, 16, 619, 2436, 10304, 16, 619, 266, 743, 33, 7036, 16, 7963, 743, 33, 7036, 16, 5643, 33, 7036, 16, 1842, 8291, 22850, 6487, 1842, 743, 33, 7036, 16, 12625, 33, ...
_('How many empty boks should be added?'), 1, 1, 100)
_('How many empty books should be added?'), 1, 1, 100)
def add_empty(self, checked): ''' Add an empty book item to the library. This does not import any formats from a book file. ''' num, ok = QInputDialog.getInt(self, _('How many empty books?'), _('How many empty boks should be added?'), 1, 1, 100) if ok: from calibre.ebooks.metadata import MetaInformation for x in xrange(num): self.library_view.model().db.import_book(MetaInformation(None), []) self.library_view.model().books_added(num)
db6f1e7d50fff518d1a207cf152586454f1c0df9 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/9125/db6f1e7d50fff518d1a207cf152586454f1c0df9/ui.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 5531, 12, 2890, 16, 5950, 4672, 9163, 1436, 392, 1008, 6978, 761, 358, 326, 5313, 18, 1220, 1552, 486, 1930, 1281, 6449, 628, 279, 6978, 585, 18, 9163, 818, 16, 1529, 273, 223...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 5531, 12, 2890, 16, 5950, 4672, 9163, 1436, 392, 1008, 6978, 761, 358, 326, 5313, 18, 1220, 1552, 486, 1930, 1281, 6449, 628, 279, 6978, 585, 18, 9163, 818, 16, 1529, 273, 223...
e = 0.0
deltax= 0.0
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): """ Given a function of one-variable and a possible bracketing interval, return the minimum of the function isolated to a fractional precision of tol. A bracketing interval is a triple (a,b,c) where (a<b<c) and func(b) < func(a),func(c). If bracket is two numbers then they are assumed to be a starting interval for a downhill bracket search (see bracket) Uses inverse interpolation when possible to speed up convergence. """ _mintol = 1.0e-11 _cg = 0.3819660 if brack is None: xa,xb,xc,fa,fb,fc,funcalls = bracket(func, args=args) elif len(brack) == 2: xa,xb,xc,fa,fb,fc,funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa,xb,xc = brack if (xa > xc): # swap so xa < xc can be assumed dum = xa; xa=xc; xc=dum assert ((xa < xb) and (xb < xc)), "Not a bracketing interval." fa = apply(func, (xa,)+args) fb = apply(func, (xb,)+args) fc = apply(func, (xc,)+args) assert ((fb<fa) and (fb < fc)), "Not a bracketing interval." funcalls = 3 else: raise ValuError, "Bracketing interval must be length 2 or 3 sequence." x=w=v=xb fw=fv=fx=apply(func, (x,)+args) if (xa < xc): a = xa; b = xc else: a = xc; b = xa e = 0.0 funcalls = 1 iter = 0 while (iter < maxiter): tol1 = tol*abs(x) + _mintol tol2 = 2.0*tol1 xmid = 0.5*(a+b) if abs(x-xmid) < (tol2-0.5*(b-a)): # check for convergence xmin=x; fval=fx break if (abs(e) <= tol1): # do a parabolic fit if (x>=xmid): e=a-x else: e=b-x d = _cg*e else: tmp1 = (x-w)*(fx-fv) tmp2 = (x-v)*(fx-fw) p = (x-v)*tmp2 - (x-w)*tmp1; tmp2 = 2.0*(tmp2-tmp1) if (tmp2 > 0.0): p = -p tmp2 = abs(tmp2) etemp = e e = d # check parabolic fit if ((p > tmp2*(a-x)) and (p < tmp2*(b-x)) and (abs(p) < abs(0.5*tmp2*etemp))): d = p*1.0/tmp2 # if it's good use it. u = x + d if ((u-a) < tol2 or (b-u) < tol2): if xmid-x >= 0: d = tol1 else: d = -tol1 else: if (x>=xmid): e=a-x # if it's bad do a golden section step else: e=b-x d = _cg*e if (abs(d) < tol1): if d >= 0: u = x + tol1 else: u = x - tol1 else: u = x + d fu = apply(func, (u,)+args) funcalls += 1 if (fu > fx): if (u<x): a=u else: b=u if (fu<=fw) or (w==x): v=w; w=u; fv=fw; fw=fu elif (fu<=fv) or (v==x) or (v==w): v=u; fv=fu else: if (u >= x): a = x else: b = x v=w; w=x; x=u fv=fw; fw=fx; fx=fu xmin = x fval = fx if full_output: return xmin, fval, iter, funcalls else: return xmin
151bc0cf429941b974cff683950f0c18642f7da4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/151bc0cf429941b974cff683950f0c18642f7da4/optimize.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 324, 547, 12, 644, 16, 833, 33, 9334, 16430, 363, 33, 7036, 16, 6085, 33, 21, 18, 8875, 73, 17, 28, 16, 1983, 67, 2844, 33, 20, 16, 25743, 33, 12483, 4672, 3536, 16803, 279, 445, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 324, 547, 12, 644, 16, 833, 33, 9334, 16430, 363, 33, 7036, 16, 6085, 33, 21, 18, 8875, 73, 17, 28, 16, 1983, 67, 2844, 33, 20, 16, 25743, 33, 12483, 4672, 3536, 16803, 279, 445, 4...
"""Return a consolidated list of resource fileId's of all classes in classList"""
"""Return a consolidated list of resource fileId's of all classes in classList; handles meta info."""
def _getResourcelistFromClasslist(self, classList): """Return a consolidated list of resource fileId's of all classes in classList""" result = []
42ab26cfdf09ce1534f276ff823434d8a9b022c9 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/5718/42ab26cfdf09ce1534f276ff823434d8a9b022c9/Generator.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 607, 477, 2183, 376, 1265, 797, 1098, 12, 2890, 16, 16035, 4672, 3536, 990, 279, 21785, 690, 666, 434, 1058, 21223, 1807, 434, 777, 3318, 316, 16035, 31, 7372, 2191, 1123, 1212...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 607, 477, 2183, 376, 1265, 797, 1098, 12, 2890, 16, 16035, 4672, 3536, 990, 279, 21785, 690, 666, 434, 1058, 21223, 1807, 434, 777, 3318, 316, 16035, 31, 7372, 2191, 1123, 1212...
self._read_buffer.put(IAC)
yield IAC
def filter(self, data): """handle a bunch of incoming bytes. this is a generator. it will yield all characters not of interest for Telnet/RFC2217.
568c4ab953cee1af3714c603f3558af138b22a71 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/10955/568c4ab953cee1af3714c603f3558af138b22a71/rfc2217.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1034, 12, 2890, 16, 501, 4672, 3536, 4110, 279, 25606, 434, 6935, 1731, 18, 333, 353, 279, 4456, 18, 518, 903, 2824, 777, 3949, 486, 434, 16513, 364, 399, 292, 2758, 19, 17926, 3787, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1034, 12, 2890, 16, 501, 4672, 3536, 4110, 279, 25606, 434, 6935, 1731, 18, 333, 353, 279, 4456, 18, 518, 903, 2824, 777, 3949, 486, 434, 16513, 364, 399, 292, 2758, 19, 17926, 3787, 4...
d.startListening()
def serviceAdded(self, *args): print args
ca5e69ccd90a403af0cb9dc4684abc51f5ad7134 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12595/ca5e69ccd90a403af0cb9dc4684abc51f5ad7134/mdns.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1156, 8602, 12, 2890, 16, 380, 1968, 4672, 1172, 833, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1156, 8602, 12, 2890, 16, 380, 1968, 4672, 1172, 833, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
if i > 0:
if i >= 0:
def read_footers (self): i = self.buf.find('\r\n\r\n') if i > 0: fp = StringIO.StringIO(self.buf[:i]) self.buf = self.buf[i+4:] msg = wc.http.header.WcMessage(fp) fp.close() for name in msg: for value in msg.getheaders(name): self.headers.addheader(name, value)
13d3c310a915e8ab6172f446172dcee2ea602da9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13d3c310a915e8ab6172f446172dcee2ea602da9/UnchunkStream.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 67, 10923, 414, 261, 2890, 4672, 277, 273, 365, 18, 4385, 18, 4720, 2668, 64, 86, 64, 82, 64, 86, 64, 82, 6134, 309, 277, 1545, 374, 30, 4253, 273, 15777, 18, 780, 4294, 12, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 67, 10923, 414, 261, 2890, 4672, 277, 273, 365, 18, 4385, 18, 4720, 2668, 64, 86, 64, 82, 64, 86, 64, 82, 6134, 309, 277, 1545, 374, 30, 4253, 273, 15777, 18, 780, 4294, 12, 2...
image.thumbnail((keys['width'],keys['height']), pilfilter)
image.thumbnail(size, pilfilter) format = image.format and image.format or 'PNG'
def scale(self,data,w,h): """ scale image (with material from ImageTag_Hotfix)""" #make sure we have valid int's keys = {'height':int(h), 'width':int(w)}
b9a09d0a2891fd633b7ab94c27efab7cc931e424 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12165/b9a09d0a2891fd633b7ab94c27efab7cc931e424/Field.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3159, 12, 2890, 16, 892, 16, 91, 16, 76, 4672, 3536, 3159, 1316, 261, 1918, 9390, 628, 3421, 1805, 67, 25270, 904, 15574, 468, 6540, 3071, 732, 1240, 923, 509, 1807, 1311, 273, 13666, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3159, 12, 2890, 16, 892, 16, 91, 16, 76, 4672, 3536, 3159, 1316, 261, 1918, 9390, 628, 3421, 1805, 67, 25270, 904, 15574, 468, 6540, 3071, 732, 1240, 923, 509, 1807, 1311, 273, 13666, ...
self.assertEqual(bob.res_port, 5301)
def test_init(self): bob = BoB() self.assertEqual(bob.verbose, False) self.assertEqual(bob.msgq_socket_file, None) self.assertEqual(bob.auth_port, 5300) self.assertEqual(bob.res_port, 5301) self.assertEqual(bob.address, None) self.assertEqual(bob.cc_session, None) self.assertEqual(bob.ccs, None) self.assertEqual(bob.processes, {}) self.assertEqual(bob.dead_processes, {}) self.assertEqual(bob.runnable, False) self.assertEqual(bob.uid, None) self.assertEqual(bob.username, None) self.assertEqual(bob.nocache, False) self.assertEqual(bob.cfg_start_auth, True) self.assertEqual(bob.cfg_start_recurse, False)
fe2cd9cc0abb397f22d19148f39e59d5822e2867 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6792/fe2cd9cc0abb397f22d19148f39e59d5822e2867/bind10_test.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 2738, 12, 2890, 4672, 800, 70, 273, 17980, 38, 1435, 365, 18, 11231, 5812, 12, 70, 947, 18, 11369, 16, 1083, 13, 365, 18, 11231, 5812, 12, 70, 947, 18, 3576, 85, 67, 7814, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 2738, 12, 2890, 4672, 800, 70, 273, 17980, 38, 1435, 365, 18, 11231, 5812, 12, 70, 947, 18, 11369, 16, 1083, 13, 365, 18, 11231, 5812, 12, 70, 947, 18, 3576, 85, 67, 7814, ...
- id: the article id"""
- id: the message id"""
def stat(self, id): """Process a STAT command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: the article number - id: the article id"""
d53b247497ce7cc7a82245cae6fc2cbef0a341a5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d53b247497ce7cc7a82245cae6fc2cbef0a341a5/nntplib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 610, 12, 2890, 16, 612, 4672, 3536, 2227, 279, 2347, 789, 1296, 18, 225, 5067, 30, 300, 612, 30, 7559, 1300, 578, 883, 612, 2860, 30, 300, 1718, 30, 1438, 766, 309, 6873, 300, 9884, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 610, 12, 2890, 16, 612, 4672, 3536, 2227, 279, 2347, 789, 1296, 18, 225, 5067, 30, 300, 612, 30, 7559, 1300, 578, 883, 612, 2860, 30, 300, 1718, 30, 1438, 766, 309, 6873, 300, 9884, ...
values = [str(v) for v in value]
values = ['%s' % v for v in value]
def starttag(self): parts = [self.tagname] for name, value in self.attlist(): if value is None: # boolean attribute parts.append(name) elif isinstance(value, ListType): values = [str(v) for v in value] parts.append('%s="%s"' % (name, ' '.join(values))) else: parts.append('%s="%s"' % (name, str(value))) return '<%s>' % ' '.join(parts)
1b03c5a8d42fa6ce4cb9ffabb8169cf314b03b83 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8194/1b03c5a8d42fa6ce4cb9ffabb8169cf314b03b83/nodes.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 787, 2692, 12, 2890, 4672, 2140, 273, 306, 2890, 18, 2692, 529, 65, 364, 508, 16, 460, 316, 365, 18, 4558, 1098, 13332, 309, 460, 353, 599, 30, 6647, 468, 1250, 1566, 2140, 18, 6923, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 787, 2692, 12, 2890, 4672, 2140, 273, 306, 2890, 18, 2692, 529, 65, 364, 508, 16, 460, 316, 365, 18, 4558, 1098, 13332, 309, 460, 353, 599, 30, 6647, 468, 1250, 1566, 2140, 18, 6923, ...
if len(shape(matrix)) == 1: if fabs(sum(matrix)-1.0) > 0.01:
if len(matrix.shape) == 1: if numpy.fabs(sum(matrix)-1.0) > 0.01:
def _copy_and_check(matrix, desired_shape): # Copy the matrix. matrix = array(matrix, MATCODE, copy=1) # Check the dimensions. if shape(matrix) != desired_shape: raise ValuError("Incorrect dimension") # Make sure it's normalized. if len(shape(matrix)) == 1: if fabs(sum(matrix)-1.0) > 0.01: raise ValueError("matrix not normalized to 1.0") elif len(shape(matrix)) == 2: for i in range(len(matrix)): if fabs(sum(matrix[i])-1.0) > 0.01: raise ValueError("matrix %d not normalized to 1.0" % i) else: raise ValueError("I don't handle matrices > 2 dimensions") return matrix
9f6ccc18a75e2a2d3871e2b69f0ac7d071a00baf /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/7167/9f6ccc18a75e2a2d3871e2b69f0ac7d071a00baf/MarkovModel.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 3530, 67, 464, 67, 1893, 12, 5667, 16, 6049, 67, 4867, 4672, 468, 5631, 326, 3148, 18, 3148, 273, 526, 12, 5667, 16, 490, 789, 5572, 16, 1610, 33, 21, 13, 468, 2073, 326, 5769, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 3530, 67, 464, 67, 1893, 12, 5667, 16, 6049, 67, 4867, 4672, 468, 5631, 326, 3148, 18, 3148, 273, 526, 12, 5667, 16, 490, 789, 5572, 16, 1610, 33, 21, 13, 468, 2073, 326, 5769, ...
parser.add_option('-d', '--base-dir',
parser.add_option('-d', '--base-dir',
def option_parser(usage=_('%prog URL\n\nWhere URL is for example http://google.com')): parser = OptionParser(usage=usage) parser.add_option('-d', '--base-dir', help=_('Base directory into which URL is saved. Default is %default'), default='.', type='string', dest='dir') parser.add_option('-t', '--timeout', help=_('Timeout in seconds to wait for a response from the server. Default: %default s'), default=10.0, type='float', dest='timeout') parser.add_option('-r', '--max-recursions', default=1, help=_('Maximum number of levels to recurse i.e. depth of links to follow. Default %default'), type='int', dest='max_recursions') parser.add_option('-n', '--max-files', default=sys.maxint, type='int', dest='max_files', help=_('The maximum number of files to download. This only applies to files from <a href> tags. Default is %default')) parser.add_option('--delay', default=0, dest='delay', type='int', help=_('Minimum interval in seconds between consecutive fetches. Default is %default s')) parser.add_option('--encoding', default=None, help=_('The character encoding for the websites you are trying to download. The default is to try and guess the encoding.')) parser.add_option('--match-regexp', default=[], action='append', dest='match_regexps', help=_('Only links that match this regular expression will be followed. This option can be specified multiple times, in which case as long as a link matches any one regexp, it will be followed. By default all links are followed.')) parser.add_option('--filter-regexp', default=[], action='append', dest='filter_regexps', help=_('Any link that matches this regular expression will be ignored. This option can be specified multiple times, in which case as long as any regexp matches a link, it will be ignored.By default, no links are ignored. If both --filter-regexp and --match-regexp are specified, then --filter-regexp is applied first.')) parser.add_option('--dont-download-stylesheets', action='store_true', default=False, help=_('Do not download CSS stylesheets.'), dest='no_stylesheets') parser.add_option('--verbose', help=_('Show detailed output information. Useful for debugging'), default=False, action='store_true', dest='verbose') return parser
596b52afac5365d3f7c48b017160fc09530e9c95 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/9125/596b52afac5365d3f7c48b017160fc09530e9c95/simple.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1456, 67, 4288, 12, 9167, 33, 67, 29909, 14654, 1976, 64, 82, 64, 82, 5262, 1976, 353, 364, 3454, 1062, 2207, 9536, 18, 832, 26112, 30, 2082, 273, 18862, 12, 9167, 33, 9167, 13, 2082, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1456, 67, 4288, 12, 9167, 33, 67, 29909, 14654, 1976, 64, 82, 64, 82, 5262, 1976, 353, 364, 3454, 1062, 2207, 9536, 18, 832, 26112, 30, 2082, 273, 18862, 12, 9167, 33, 9167, 13, 2082, ...
if self.message_summary.enabled:
if set(['auth.password', 'auth.username', 'sip.outbound_proxy', 'sip.transport_list', 'sip.subscribe_interval']).intersection(notification.data.modified) and self.message_summary.enabled:
def _NH_CFGSettingsObjectDidChange(self, notification): # activate/deactivate the account or start/stop/reload the registration process if self._started: if 'enabled' in notification.data.modified: if self.enabled: self._activate() else: self._deactivate() elif self.enabled and 'sip.register' in notification.data.modified: if self.sip.register: self._registrar.activate() else: self._registrar.deactivate() elif self.enabled and set(['message_summary.enabled', 'message_summary.voicemail_uri']).intersection(notification.data.modified): if self.message_summary.enabled: self._mwi_handler.activate() else: self._mwi_handler.deactivate() elif self.enabled and set(['auth.password', 'auth.username', 'sip.outbound_proxy', 'sip.transport_list']).intersection(notification.data.modified): if self.sip.register: self._registrar.reload_settings() if self.message_summary.enabled: self._mwi_handler.activate() elif self.enabled and self.sip.register and 'sip.register_interval' in notification.data.modified: self._registrar.reload_settings() elif self.enabled and self.message_summary.enabled and 'sip.subscribe_interval' in notification.data.modified: self._mwi_handler.activate()
5ff6d6829acebbcf50126c70a4918791823f2b50 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3449/5ff6d6829acebbcf50126c70a4918791823f2b50/account.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 50, 44, 67, 19727, 2628, 921, 18250, 3043, 12, 2890, 16, 3851, 4672, 468, 10235, 19, 323, 10014, 326, 2236, 578, 787, 19, 5681, 19, 17517, 326, 7914, 1207, 309, 365, 6315, 14561, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 50, 44, 67, 19727, 2628, 921, 18250, 3043, 12, 2890, 16, 3851, 4672, 468, 10235, 19, 323, 10014, 326, 2236, 578, 787, 19, 5681, 19, 17517, 326, 7914, 1207, 309, 365, 6315, 14561, ...
ListCtrlView.__init__(self, parent, model, wx.wxLC_REPORT,
ListCtrlView.__init__(self, parent, model, wx.LC_REPORT,
def __init__(self, parent, model): ListCtrlView.__init__(self, parent, model, wx.wxLC_REPORT, (('Start', self.OnStart, '-', ''), ('End', self.OnEnd, '-', ''), ('Delete', self.OnDelete, '-', ''), ), 1)
46afb5fb2ee38e519ce9d813e1e12ac90f3129c9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/4325/46afb5fb2ee38e519ce9d813e1e12ac90f3129c9/AppTimeTrack.plug-in.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 982, 16, 938, 4672, 987, 12418, 1767, 16186, 2738, 972, 12, 2890, 16, 982, 16, 938, 16, 7075, 18, 13394, 67, 22710, 16, 261, 2668, 1685, 2187, 365, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 982, 16, 938, 4672, 987, 12418, 1767, 16186, 2738, 972, 12, 2890, 16, 982, 16, 938, 16, 7075, 18, 13394, 67, 22710, 16, 261, 2668, 1685, 2187, 365, 18, ...
self._progress = UpdateProgress(self, self.ID_PROGRESS_BAR)
self._panel = wx.Panel(self, wx.ID_ANY) self._progress = UpdateProgress(self._panel, self.ID_PROGRESS_BAR)
def __init__(self, parent, id, title, pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.DEFAULT_DIALOG_STYLE | wx.MINIMIZE_BOX): """Creates a standalone window that is used for downloading updates for the editor. """ wx.Frame.__init__(self, parent, id, title, pos, size, style) try: if wx.Platform == "__WXMSW__": ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.ico" self.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_ICO)) else: ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.png" self.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_PNG)) finally: pass
843e5a514f51011229f95772d2def38c61b1b96d /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/3530/843e5a514f51011229f95772d2def38c61b1b96d/updater.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 982, 16, 612, 16, 2077, 16, 949, 273, 7075, 18, 1868, 2555, 16, 963, 273, 7075, 18, 1868, 1225, 16, 2154, 273, 7075, 18, 5280, 67, 2565, 18683, 67, 150...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 982, 16, 612, 16, 2077, 16, 949, 273, 7075, 18, 1868, 2555, 16, 963, 273, 7075, 18, 1868, 1225, 16, 2154, 273, 7075, 18, 5280, 67, 2565, 18683, 67, 150...
dqmData['Runs'] = ",".join(list(runs_to_process))
dqmData['Runs'] = \ ",".join([str(x) for x in list(runs_to_process)])
def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default version = os.environ.get("CMSSW_VERSION") if version is None: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) architecture = os.environ.get("SCRAM_ARCH") if architecture is None: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' #os.path.expandvars(os.environ.get('PUTIL', None)) skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir # There's no need to expand the shell variables anymore #if scriptsDir.startswith('$') : # scriptsDirTemp = os.environ.get(scriptsDir[1:],None) # scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, "PU", pileup_arg.strip()]) else: special_tag = "_".join(["PU", pileup_arg.strip()]) pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Runs'] = '1' dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: RUN, LABEL, FILE, EVENTS, #// PRIMARY #\\ data_run = input_data.get('RUN', '') data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', None) if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version result_xml = reader.dbs.executeQuery(query) result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and the #// provided list of runs #\\ runs_list = \ [x.strip() for x in data_run.split('|') if x.strip()] runs_in_dbs = [x['RunNumber'] for x in \ reader.dbs.listRuns(target_dataset)] runs_in_dbs.sort() # Creating lambda function for filtering runs. expr = '' # First a string expression to evaluate is_the_first = True for run in runs_list: if is_the_first: expr += "(" is_the_first = False else: expr += " or " # Run range: XXXXXX-XXXXXX if run.count("-"): run_limits = \ [x.strip() for x in run.split('-') if x.strip()] expr += "(x >= %s and x <= %s)" % ( run_limits[0], run_limits[1]) else: expr += "x == %s" % run if not is_the_first: expr += ")" # Here comes the lambda funtion runs_filter = lambda x: eval(expr) # Filtering runs in DBS using the list provided in the # input file. target_runs = filter(runs_filter, runs_in_dbs) # Pulling up input files from DBS (including run info). input_files = reader.dbs.listFiles( path=target_dataset, retriveList=['retrive_run']) # // # // Parsing input blocks #// blocks = {} for input_file in input_files: # Skip files with no events # A block will be skipped if all its files have 0 # events if input_file['NumberOfEvents'] == 0: continue runs = \ [str(x['RunNumber']) for x in input_file['RunsList']] for run in runs: if run in target_runs: break else: continue # skip file if it's not in the target_runs cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) cur_runs = \ blocks[input_file['Block']['Name']].setdefault( 'Runs', set()) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] blocks[input_file['Block']['Name']]['Runs'] = \ cur_runs.union(runs) # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] runs_to_process = set() for block in blocks: blocks_to_process.append(block) runs_to_process = runs_to_process.union(blocks[block]['Runs']) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is not None and \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Runs'] = ",".join(list(runs_to_process)) # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // HARVESTING cmsDriver commands should be ignored. RelVals #// should not run any HARVESTING configuration. Harvestings #\\ run independently after the datasets are produced. # \\ skip_step = False if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].count('HARVESTING') > 0: skip_step = True # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} dict['skipStep'] = skip_step # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData'].get('RUN', 'All')) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue # // # // Skip HARVESTING cmsDriver commands #// if steps[step]['skipStep']: print 'This is a HARVESTING cmsDriver command, skipping. ' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': i = 0 for step in sample['steps']: # Is this a HARVESTING step? If so, skip it! if steps[step]['skipStep']: continue # Not a HARVESTING step, continue normally. command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] i += 1 # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' output = [x for x in output.split('\n') if x] if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Runs'] = sample['DQMData']['Runs'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed: %s' % error sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): for run in sample['DQMData']['Runs'].split(","): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( run, dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(min)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text))
a3962ab33777264a6a08aea585d641cf5c037023 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8887/a3962ab33777264a6a08aea585d641cf5c037023/prepareRelValWorkflows.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 12, 19485, 13, 294, 3536, 225, 2911, 1971, 3053, 13815, 87, 225, 2911, 31738, 364, 20269, 4929, 434, 4275, 3053, 5216, 225, 300, 1109, 585, 19918, 6166, 4668, 4364, 364, 404, 334, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 12, 19485, 13, 294, 3536, 225, 2911, 1971, 3053, 13815, 87, 225, 2911, 31738, 364, 20269, 4929, 434, 4275, 3053, 5216, 225, 300, 1109, 585, 19918, 6166, 4668, 4364, 364, 404, 334, ...
print self.__arguments, 'EEEEEEEEEEEEEE'
def __call__(self, **kw): """Call the command with the keyword args.""" print self.__arguments, 'EEEEEEEEEEEEEE' #for arg in self.__arguments: # if arg.required and arg.name not in kw: # raise exceptions.bad_arguments_error, 'Missing %s' % arg.name return self.__callback(**kw)
93f17c6f46b59a552222713a7c9c869cfc440d1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2233/93f17c6f46b59a552222713a7c9c869cfc440d1a/command.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1991, 972, 12, 2890, 16, 2826, 9987, 4672, 3536, 1477, 326, 1296, 598, 326, 4932, 833, 12123, 468, 1884, 1501, 316, 365, 16186, 7099, 30, 468, 565, 309, 1501, 18, 4718, 471, 1501, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1991, 972, 12, 2890, 16, 2826, 9987, 4672, 3536, 1477, 326, 1296, 598, 326, 4932, 833, 12123, 468, 1884, 1501, 316, 365, 16186, 7099, 30, 468, 565, 309, 1501, 18, 4718, 471, 1501, ...
str = '';
str = ''
def build_installer(request): if (request.POST['action'] == 'build_installer'): vessels = simplejson.loads(request.POST['content']) str = ''; for vessel in vessels: str += vessel['owner'] for user in vessel['users']: str += " " + user return HttpResponse(str)
dc1bd81936fd3083d330264a270d4693f413fa21 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7995/dc1bd81936fd3083d330264a270d4693f413fa21/views.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1361, 67, 20163, 12, 2293, 4672, 309, 261, 2293, 18, 3798, 3292, 1128, 3546, 422, 296, 3510, 67, 20163, 11, 4672, 331, 403, 10558, 273, 4143, 1977, 18, 17135, 12, 2293, 18, 3798, 3292, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1361, 67, 20163, 12, 2293, 4672, 309, 261, 2293, 18, 3798, 3292, 1128, 3546, 422, 296, 3510, 67, 20163, 11, 4672, 331, 403, 10558, 273, 4143, 1977, 18, 17135, 12, 2293, 18, 3798, 3292, ...
self.app.get('/tickets/feed.rss') self.app.get('/tickets/feed.atom')
self.app.get('/bugs/feed.rss') self.app.get('/bugs/feed.atom')
def test_ticket_list_feed(self): self.app.get('/tickets/feed.rss') self.app.get('/tickets/feed.atom')
9bf0888f3eea525b9ad7890f878ccad04422d678 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1036/9bf0888f3eea525b9ad7890f878ccad04422d678/test_feeds.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 16282, 67, 1098, 67, 7848, 12, 2890, 4672, 365, 18, 2910, 18, 588, 2668, 19, 6470, 2413, 19, 7848, 18, 25151, 6134, 365, 18, 2910, 18, 588, 2668, 19, 6470, 2413, 19, 7848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 16282, 67, 1098, 67, 7848, 12, 2890, 4672, 365, 18, 2910, 18, 588, 2668, 19, 6470, 2413, 19, 7848, 18, 25151, 6134, 365, 18, 2910, 18, 588, 2668, 19, 6470, 2413, 19, 7848, ...
return 10
return defaultevents
def get_events(): maxevents = 0 try: openfile = open('/proc/sys/kernel/threads-max', 'r') except IOError: return 10 #TODO--- #Should I catch the file read to make sure its an int? maxevents = int(openfile.read()) openfile.close() return maxevents
2417e03d916953ff278668c88050b18355050c6e /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7263/2417e03d916953ff278668c88050b18355050c6e/Linux_resources.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 5989, 13332, 943, 5989, 273, 374, 225, 775, 30, 1696, 768, 273, 1696, 2668, 19, 9381, 19, 9499, 19, 8111, 19, 12495, 17, 1896, 2187, 296, 86, 6134, 1335, 8340, 30, 327, 805, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 5989, 13332, 943, 5989, 273, 374, 225, 775, 30, 1696, 768, 273, 1696, 2668, 19, 9381, 19, 9499, 19, 8111, 19, 12495, 17, 1896, 2187, 296, 86, 6134, 1335, 8340, 30, 327, 805, ...
description.append(data)
text.append(data)
def parseItem(tokens): """ <dt> Terminate by <dt>, </dt>, <dl>, </dl> or EOF. Push back if terminated by tags. """ title = None url = None description = [] dd_description = None for kind, data, attrs in tokens: if kind == DATA: description.append(data) elif kind == TAG and data == 'a': title, url, created, modified = parseLink(tokens, attrs) elif kind == TAG and data == 'dd': dd_description = parseDescription(tokens) elif data in ('dl', 'dt'): # malformed! tokens.push_back((kind,data,attrs)) break description = _join_text(description) description = dd_description or description # it is either a Folder or Bookmark if title != None: return import_util.Bookmark(title, url, description, created, modified) else: return import_util.Folder(description)
c01b1e926dcd840f1170a7e06a866bbf3d9aab50 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2355/c01b1e926dcd840f1170a7e06a866bbf3d9aab50/import_netscape.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 1180, 12, 7860, 4672, 3536, 411, 7510, 34, 225, 23945, 635, 411, 7510, 20401, 7765, 7510, 20401, 411, 5761, 20401, 7765, 5761, 34, 578, 6431, 18, 8547, 1473, 309, 14127, 635, 2342, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 1180, 12, 7860, 4672, 3536, 411, 7510, 34, 225, 23945, 635, 411, 7510, 20401, 7765, 7510, 20401, 411, 5761, 20401, 7765, 5761, 34, 578, 6431, 18, 8547, 1473, 309, 14127, 635, 2342, ...
res = self.transDB.setTransformationParameter(transID,'Status','Active')
res = self.transDB.setTransformationParameter( transID, 'Status', 'Active' )
def processTransformation(self,transDict): transID = transDict['TransformationID'] # First get the LFNs associated to the transformation res = self.transDB.getTransformationFiles(condDict={'TransformationID':transID,'Status':'Unused'}) if not res['OK']: gLogger.error("%s.processTransformation: Failed to obtain input data." % AGENT_NAME, res['Message']) return res transFiles = res['Value'] lfns = res['LFNs'] if not lfns: gLogger.info("%s.processTransformation: No 'Unused' files found for transformation." % AGENT_NAME) if transDict['Status'] == 'Flush': res = self.transDB.setTransformationParameter(transID,'Status','Active') if not res['OK']: gLogger.error("%s.execute: Failed to update transformation status to 'Active'." % AGENT_NAME, res['Message']) else: gLogger.info("%s.execute: Updated transformation status to 'Active'." % AGENT_NAME) return S_OK()
5b98b3b45ce744b0fa8f66e9d9812d0b7910a0ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12864/5b98b3b45ce744b0fa8f66e9d9812d0b7910a0ab/TransformationAgent.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1207, 15292, 12, 2890, 16, 2338, 5014, 4672, 906, 734, 273, 906, 5014, 3292, 15292, 734, 3546, 468, 5783, 336, 326, 18803, 10386, 3627, 358, 326, 8620, 400, 273, 365, 18, 2338, 2290, 18,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1207, 15292, 12, 2890, 16, 2338, 5014, 4672, 906, 734, 273, 906, 5014, 3292, 15292, 734, 3546, 468, 5783, 336, 326, 18803, 10386, 3627, 358, 326, 8620, 400, 273, 365, 18, 2338, 2290, 18,...
page = urllib.unquote(page) label = urllib.unquote(label)
def _format_link(self, formatter, ns, page, label, ignore_missing): anchor = '' if page.find('#') != -1: anchor = page[page.find('#'):] page = page[:page.find('#')] page = urllib.unquote(page) label = urllib.unquote(label)
eeb69bf8dc5a85628ed33b96461b0f6355d50de9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9317/eeb69bf8dc5a85628ed33b96461b0f6355d50de9/api.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 2139, 67, 1232, 12, 2890, 16, 4453, 16, 3153, 16, 1363, 16, 1433, 16, 2305, 67, 7337, 4672, 6984, 273, 875, 309, 1363, 18, 4720, 2668, 7, 6134, 480, 300, 21, 30, 6984, 273, 1363...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 2139, 67, 1232, 12, 2890, 16, 4453, 16, 3153, 16, 1363, 16, 1433, 16, 2305, 67, 7337, 4672, 6984, 273, 875, 309, 1363, 18, 4720, 2668, 7, 6134, 480, 300, 21, 30, 6984, 273, 1363...
'jinja',
def bzr_revision(): bzr = Popen(('bzr', 'tags', '--sort', 'time'), stdout=PIPE) output, error = bzr.communicate() code = bzr.wait() if code != 0: raise Exception(u'Error running bzr tags') lines = output.splitlines() if len(lines) == 0: tag = '0.0.0' revision = '0' else: tag, revision = lines[-1].split() bzr = Popen(('bzr', 'log', '--line', '-c', '-1'), stdout=PIPE) output, error = bzr.communicate() code = bzr.wait() if code != 0: raise Exception(u"Error running bzr log") latest = output.split(':')[0] versionstring = latest == revision and tag or '%s-bzr%s' % (tag, latest) f = open('ibid/.version', 'w') f.write(versionstring) f.close() return versionstring
a096c2ce01c524ef97e763b0070ab56c81c4e522 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/12048/a096c2ce01c524ef97e763b0070ab56c81c4e522/setup.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 24788, 86, 67, 13057, 13332, 24788, 86, 273, 14339, 12, 2668, 25292, 86, 2187, 296, 4156, 2187, 3534, 3804, 2187, 296, 957, 19899, 3909, 33, 27602, 13, 876, 16, 555, 273, 24788, 86, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 24788, 86, 67, 13057, 13332, 24788, 86, 273, 14339, 12, 2668, 25292, 86, 2187, 296, 4156, 2187, 3534, 3804, 2187, 296, 957, 19899, 3909, 33, 27602, 13, 876, 16, 555, 273, 24788, 86, 18, ...
class FeedItem(pim.ContentItem):
class FeedItem(pim.Note):
def addDisplayWhos(self, whos): super(FeedChannel, self).addDisplayWhos(whos) author = getattr(self, 'author', None) if author is not None: whos.append((10, author, 'author'))
1a21dc24df42b97b1d2f3d40c0925fdf817add9e /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9228/1a21dc24df42b97b1d2f3d40c0925fdf817add9e/channels.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 4236, 2888, 538, 12, 2890, 16, 600, 538, 4672, 2240, 12, 8141, 2909, 16, 365, 2934, 1289, 4236, 2888, 538, 12, 3350, 538, 13, 2869, 273, 3869, 12, 2890, 16, 296, 4161, 2187, 599, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 4236, 2888, 538, 12, 2890, 16, 600, 538, 4672, 2240, 12, 8141, 2909, 16, 365, 2934, 1289, 4236, 2888, 538, 12, 3350, 538, 13, 2869, 273, 3869, 12, 2890, 16, 296, 4161, 2187, 599, ...
if len(res.int) > context.prec and shouldround:
if res.int >= prec_limit and shouldround:
def _divide(self, other, divmod = 0, context=None): """Return a / b, to context.prec precision.
1a63da64df2af26e1f050749c19281f1de9084fa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/1a63da64df2af26e1f050749c19281f1de9084fa/decimal.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 2892, 831, 12, 2890, 16, 1308, 16, 26105, 273, 374, 16, 819, 33, 7036, 4672, 3536, 990, 279, 342, 324, 16, 358, 819, 18, 4036, 6039, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 2892, 831, 12, 2890, 16, 1308, 16, 26105, 273, 374, 16, 819, 33, 7036, 4672, 3536, 990, 279, 342, 324, 16, 358, 819, 18, 4036, 6039, 18, 2, -100, -100, -100, -100, -100, -100, -...
version = "tags/%s_%s" % ( p, version )
version = "tags/%s/%s_%s/%s" % ( p, p, version, p )
def tagSVNReleases( mainCFG, taggedReleases ): global cliParams releasesCFG = mainCFG[ 'Releases' ] cmtCompatiblePackages = mainCFG.getOption( 'CMTCompatiblePackages', [] ) if not cliParams.userName: cliParams.discoverUserName() for releaseVersion in cliParams.releasesToBuild: if not cliParams.forceSVNLinks and releaseVersion in taggedReleases: gLogger.info( "Release %s is already tagged, skipping" % releaseVersion ) continue if releaseVersion not in releasesCFG.listSections(): gLogger.error( "Release %s not defined in releases.cfg" % releaseVersion ) continue releaseSVNPath = svnSshRoot % ( cliParams.userName, "/tags/%s" % ( releaseVersion ) ) if releaseVersion not in taggedReleases: svnCmd = "svn --parents -m 'Release %s' mkdir '%s'" % ( releaseVersion, releaseSVNPath ) exitStatus, stdData, errData = execAndGetOutput( svnCmd ) if exitStatus: gLogger.error( "Error while generating release tag", "\n".join( [ stdData, errData ] ) ) continue svnLinks = [] packages = releasesCFG[ releaseVersion ].listOptions() packages.sort() for p in packages: version = releasesCFG[ releaseVersion ].getOption( p, "" ) if version.strip().lower() in ( "trunk", "", "head" ): version = "trunk" else: if p in cmtCompatiblePackages: version = "tags/%s_%s" % ( p, version ) else: version = "tags/%s" % version svnLinks.append( "%s http://svnweb.cern.ch/guest/dirac/%s/%s/%s" % ( p, p, version, p ) ) tmpPath = tempfile.mkdtemp() fd = open( os.path.join( tmpPath, "extProp" ), "wb" ) fd.write( "%s\n" % "\n".join( svnLinks ) ) fd.close() svnCmds = [] svnCmds.append( "svn co -N '%s' '%s/svnco'" % ( releaseSVNPath, tmpPath ) ) svnCmds.append( "svn propset svn:externals -F '%s/extProp' '%s/svnco'" % ( tmpPath, tmpPath ) ) svnCmds.append( "svn ci -m 'Release %s svn:externals' '%s/svnco'" % ( releaseVersion, tmpPath ) ) for cmd in svnCmds: exitStatus, stdData, errData = execAndGetOutput( cmd ) if exitStatus: gLogger.error( "Error while generating release tag", "\n".join( [ stdData, errData ] ) ) continue
c365b50162ff356854f55bc10a011a8ca169fdae /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/12864/c365b50162ff356854f55bc10a011a8ca169fdae/dirac-distribution.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1047, 23927, 50, 26217, 12, 2774, 19727, 16, 12503, 26217, 262, 30, 2552, 4942, 1370, 225, 14824, 19727, 273, 2774, 19727, 63, 296, 26217, 11, 308, 276, 1010, 14599, 11425, 273, 2774, 1972...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1047, 23927, 50, 26217, 12, 2774, 19727, 16, 12503, 26217, 262, 30, 2552, 4942, 1370, 225, 14824, 19727, 273, 2774, 19727, 63, 296, 26217, 11, 308, 276, 1010, 14599, 11425, 273, 2774, 1972...
if event.ShiftDown():
if multiSelect:
def OnControlSelect(self, event): """ Control is clicked. Either select it or add control from palette """ ctrl = self.senderMapper.getObject(event)
2409984574299440bdc4e3c954b3574ca35f84c5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/4325/2409984574299440bdc4e3c954b3574ca35f84c5/Designer.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2755, 3367, 3391, 12, 2890, 16, 871, 4672, 3536, 8888, 353, 17688, 18, 14635, 2027, 518, 578, 527, 3325, 628, 12127, 3536, 225, 6414, 273, 365, 18, 15330, 4597, 18, 588, 921, 12, 2575, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2755, 3367, 3391, 12, 2890, 16, 871, 4672, 3536, 8888, 353, 17688, 18, 14635, 2027, 518, 578, 527, 3325, 628, 12127, 3536, 225, 6414, 273, 365, 18, 15330, 4597, 18, 588, 921, 12, 2575, ...
if edit_mode: self.stream_names.pop(stream_num) self.stream_uris.pop(stream_num)
def new_stream(self, action, stream_num=-1): if stream_num > -1: edit_mode = True else: edit_mode = False # Prompt user for playlist name: dialog = gtk.Dialog(None, self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)) if edit_mode: dialog.set_title(_("Edit Stream")) else: dialog.set_title(_("New Stream")) hbox = gtk.HBox() namelabel = gtk.Label(_('Stream name') + ':') hbox.pack_start(namelabel, False, False, 5) nameentry = gtk.Entry() if edit_mode: nameentry.set_text(self.stream_names[stream_num]) hbox.pack_start(nameentry, True, True, 5) hbox2 = gtk.HBox() urllabel = gtk.Label(_('Stream URL') + ':') hbox2.pack_start(urllabel, False, False, 5) urlentry = gtk.Entry() if edit_mode: urlentry.set_text(self.stream_uris[stream_num]) hbox2.pack_start(urlentry, True, True, 5) self.set_label_widths_equal([namelabel, urllabel]) dialog.vbox.pack_start(hbox) dialog.vbox.pack_start(hbox2) dialog.vbox.show_all() response = dialog.run() if response == gtk.RESPONSE_ACCEPT: name = nameentry.get_text() uri = urlentry.get_text() if len(name) > 0 and len(uri) > 0: # Make sure this stream name doesn't already exit: for item in self.stream_names: if item == name: dialog.destroy() # show error here error_dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_CLOSE, _("A stream with this name already exists.")) error_dialog.set_title(_("New Stream")) error_dialog.run() error_dialog.destroy() return if edit_mode: self.stream_names.pop(stream_num) self.stream_uris.pop(stream_num) self.stream_names.append(name) self.stream_uris.append(uri) self.streams_populate() self.settings_save() dialog.destroy() self.iterate_now()
8e7f75d3649afea0782df60c0cbc0b9c2fb7cc04 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2312/8e7f75d3649afea0782df60c0cbc0b9c2fb7cc04/sonata.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 394, 67, 3256, 12, 2890, 16, 1301, 16, 1407, 67, 2107, 29711, 21, 4672, 309, 1407, 67, 2107, 405, 300, 21, 30, 3874, 67, 3188, 273, 1053, 469, 30, 3874, 67, 3188, 273, 1083, 468, 283...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 394, 67, 3256, 12, 2890, 16, 1301, 16, 1407, 67, 2107, 29711, 21, 4672, 309, 1407, 67, 2107, 405, 300, 21, 30, 3874, 67, 3188, 273, 1053, 469, 30, 3874, 67, 3188, 273, 1083, 468, 283...
value = fromPython(value, self._SO_validatorState) toUpdate[name] = value
dbValue = fromPython(value, self._SO_validatorState) else: dbValue = value toUpdate[name] = dbValue
def set(self, **kw): # set() is used to update multiple values at once, # potentially with one SQL statement if possible.
32493673f4b6c949ab6cf873c5f4a11269a33b9f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/6718/32493673f4b6c949ab6cf873c5f4a11269a33b9f/main.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 444, 12, 2890, 16, 2826, 9987, 4672, 468, 444, 1435, 353, 1399, 358, 1089, 3229, 924, 622, 3647, 16, 468, 13935, 598, 1245, 3063, 3021, 309, 3323, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 444, 12, 2890, 16, 2826, 9987, 4672, 468, 444, 1435, 353, 1399, 358, 1089, 3229, 924, 622, 3647, 16, 468, 13935, 598, 1245, 3063, 3021, 309, 3323, 18, 2, -100, -100, -100, -100, -100, ...
self.nodemodule = None
def __init__(self, name, description = '', category = '', inputs = None, outputs = None, nodemodule = '', nodeclass = None, widgetmodule = None, widgetclass = None, search_path = [], **kargs): """ Create a node factory. @param name : user name for the node (must be unique) (String) @param description : description of the node (String) @param category : category of the node (String) @param nodemodule : python module to import for node (String) @param nodeclass : node class name to be created (String) @param widgetmodule : python module to import for widget (String) @param widgetclass : widget class name (String) @param inputs : inputs description @param outputs : outputs description @param seach_path (opt) : list of directories where to search for module Nota : inputs and outputs parameters are list of dictionnary such inputs = (dict(name='x', interface=IInt, value=0,) outputs = (dict(name='y', interface=IInt) """ AbstractFactory.__init__(self, name, description, category, inputs, outputs, **kargs)
293d98b1a0fbd4fd84d3621c66adb3073fd74d6c /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/11338/293d98b1a0fbd4fd84d3621c66adb3073fd74d6c/node.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 508, 16, 2477, 273, 10226, 3150, 273, 10226, 4540, 273, 599, 16, 6729, 273, 599, 16, 756, 2978, 273, 10226, 756, 1106, 273, 599, 16, 3604, 2978, 273, 599...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 508, 16, 2477, 273, 10226, 3150, 273, 10226, 4540, 273, 599, 16, 6729, 273, 599, 16, 756, 2978, 273, 10226, 756, 1106, 273, 599, 16, 3604, 2978, 273, 599...
for e in self._backend.iterator_out_edges(vertices, labels): yield e for e in self._backend.iterator_in_edges(vertices, labels): yield e
from itertools import chain return chain(self._backend.iterator_out_edges(vertices, labels), self._backend.iterator_in_edges(vertices, labels))
def edge_iterator(self, vertices=None, labels=True, ignore_direction=False): """ Returns an iterator over the edges incident with any vertex given. If the graph is directed, iterates over edges going out only. If vertices is None, then returns an iterator over all edges. If self is directed, returns outgoing edges only.
7b9afb3b66d010e574487a319d86af01cd22bcc7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9417/7b9afb3b66d010e574487a319d86af01cd22bcc7/generic_graph.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3591, 67, 9838, 12, 2890, 16, 6928, 33, 7036, 16, 3249, 33, 5510, 16, 2305, 67, 9855, 33, 8381, 4672, 3536, 2860, 392, 2775, 1879, 326, 5231, 22933, 598, 1281, 5253, 864, 18, 971, 326,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3591, 67, 9838, 12, 2890, 16, 6928, 33, 7036, 16, 3249, 33, 5510, 16, 2305, 67, 9855, 33, 8381, 4672, 3536, 2860, 392, 2775, 1879, 326, 5231, 22933, 598, 1281, 5253, 864, 18, 971, 326,...
"""Extracts the exception line from the input string, and marks
"""Extracts the exception line from the input string, and marks
def markInputline( self, markerString = ">!<" ): """Extracts the exception line from the input string, and marks the location of the exception with a special symbol. """ line_str = self.line line_column = self.column - 1 if markerString: line_str = "".join( [line_str[:line_column], markerString, line_str[line_column:]]) return line_str.strip()
f734f22ba11a967624c3b1351b79174f1d17c2ac /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3554/f734f22ba11a967624c3b1351b79174f1d17c2ac/pyparsing.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2267, 1210, 1369, 12, 365, 16, 5373, 780, 273, 14402, 5, 32, 6, 262, 30, 3536, 12809, 326, 1520, 980, 628, 326, 810, 533, 16, 471, 13999, 326, 2117, 434, 326, 1520, 598, 279, 4582, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2267, 1210, 1369, 12, 365, 16, 5373, 780, 273, 14402, 5, 32, 6, 262, 30, 3536, 12809, 326, 1520, 980, 628, 326, 810, 533, 16, 471, 13999, 326, 2117, 434, 326, 1520, 598, 279, 4582, 3...
print "start: %f" % start print "length: %f" % length
def _play(self, start, length): print "start: %f" % start print "length: %f" % length self.isplaying = True startframe = int(round(start * self.wave_reference.getframerate())) samplelen = int(round(length * self.wave_reference.getframerate())) print startframe print samplelen remaining = samplelen chunk = 1024 try: self.wave_reference.setpos(startframe) except wave.Error: self.isplaying = False return stream = self.audio.open(format = self.audio.get_format_from_width(self.wave_reference.getsampwidth()), channels = self.wave_reference.getnchannels(), rate = self.wave_reference.getframerate(), output = True) # read data if remaining >= 1024: data = self.wave_reference.readframes(chunk) remaining -= chunk else: data = self.wave_reference.readframes(remaining) remaining = 0 # play stream while data != '' and self.isplaying==True: stream.write(data) self.time = float(self.wave_reference.tell()) / float(self.wave_reference.getframerate()) print self.time if remaining >= 1024: data = self.wave_reference.readframes(chunk) remaining -= chunk else: data = self.wave_reference.readframes(remaining) remaining = 0 stream.close() self.isplaying = False
5bfe4a45b838b05eef17db6601f315f24a8a82ad /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9693/5bfe4a45b838b05eef17db6601f315f24a8a82ad/SoundPlayer.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1601, 12, 2890, 16, 787, 16, 769, 4672, 365, 18, 291, 1601, 310, 273, 1053, 787, 3789, 273, 509, 12, 2260, 12, 1937, 380, 365, 18, 17838, 67, 6180, 18, 588, 74, 1940, 12600, 143...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1601, 12, 2890, 16, 787, 16, 769, 4672, 365, 18, 291, 1601, 310, 273, 1053, 787, 3789, 273, 509, 12, 2260, 12, 1937, 380, 365, 18, 17838, 67, 6180, 18, 588, 74, 1940, 12600, 143...
while (ss[--i].type !== BLOCK) ; // a BLOCK *must* be found.
while (ss[--i].type !== BLOCK) ;
def Variables(tokenizer, compilerContext, letBlock) { var builder = compilerContext.builder var node, ss, i, s var build, addDecl, finish switch (tokenizer.token.type) { case VAR: build = builder.VAR$build addDecl = builder.VAR$addDecl finish = builder.VAR$finish s = compilerContext break case CONST: build = builder.CONST$build addDecl = builder.CONST$addDecl finish = builder.CONST$finish s = compilerContext break case LET: case LEFT_PAREN: build = builder.LET$build addDecl = builder.LET$addDecl finish = builder.LET$finish if (!letBlock) { ss = compilerContext.stmtStack i = ss.length while (ss[--i].type !== BLOCK) ; // a BLOCK *must* be found. /* * Lets at the def toplevel are just vars, at least in * SpiderMonkey. */ if (i == 0) { build = builder.VAR$build addDecl = builder.VAR$addDecl finish = builder.VAR$finish s = compilerContext } else { s = ss[i] } } else { s = letBlock } break } node = build.call(builder, tokenizer) initializers = [] do { var tokenType = tokenizer.get() /* * FIXME Should have a special DECLARATION node instead of overloading * IDENTIFIER to mean both identifier declarations and destructured * declarations. */ var childNode = builder.DECL$build(tokenizer) if (tokenType == LEFT_BRACKET || tokenType == LEFT_CURLY) { // Pass in s if we need to add each pattern matched into // its varDecls, else pass in compilerContext. var data = null // Need to unget to parse the full destructured expression. tokenizer.unget() builder.DECL$setName(childNode, DestructuringExpression(tokenizer, compilerContext, True, s)) if (compilerContext.inForLoopInit && tokenizer.peek() == IN) { addDecl.call(builder, node, childNode, s) continue }
58afad4d3cade0a037f53f0a04cb23589df417b1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12949/58afad4d3cade0a037f53f0a04cb23589df417b1/Parser.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 23536, 12, 2316, 1824, 16, 5274, 1042, 16, 2231, 1768, 13, 288, 569, 2089, 273, 5274, 1042, 18, 9574, 569, 756, 16, 5202, 16, 277, 16, 272, 569, 1361, 16, 527, 3456, 16, 4076, 1620, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 23536, 12, 2316, 1824, 16, 5274, 1042, 16, 2231, 1768, 13, 288, 569, 2089, 273, 5274, 1042, 18, 9574, 569, 756, 16, 5202, 16, 277, 16, 272, 569, 1361, 16, 527, 3456, 16, 4076, 1620, ...
with open("sobiz_dump.html", "w") as f: f.write(self.html)
def downloadHTML(self): self.html = self.load(self.pyfile.url, cookies=True) with open("sobiz_dump.html", "w") as f: f.write(self.html) if not self.account: html = self.load("%s/free/" % self.pyfile.url, post={"dl_free":"1"}, cookies=True) if re.search(r"/failure/full/1", self.req.lastEffectiveURL): self.setWait(120) self.log.debug("%s: no free slots, waiting 120 seconds" % (self.__name__)) self.wait() self.retry() captcha = self.decryptCaptcha("http://www.share-online.biz/captcha.php", get={"rand":"0.%s" % random.randint(10**15,10**16)}, cookies=True) self.log.debug("%s Captcha: %s" % (self.__name__, captcha)) sleep(3) html = self.load(self.pyfile.url, post={"captchacode": captcha}, cookies=True) if re.search(r"Der Download ist Ihnen zu langsam", html): #m = re.search("var timeout='(\d+)';", self.html[1]) #self.waitUntil = time() + int(m.group(1)) if m else 30 return True
c2e9cd00598a0a30d3bca04254bd6b94a6bef85f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9838/c2e9cd00598a0a30d3bca04254bd6b94a6bef85f/ShareonlineBiz.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4224, 4870, 12, 2890, 4672, 365, 18, 2620, 273, 365, 18, 945, 12, 2890, 18, 2074, 768, 18, 718, 16, 7237, 33, 5510, 13, 225, 309, 486, 365, 18, 4631, 30, 1729, 273, 365, 18, 945, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4224, 4870, 12, 2890, 4672, 365, 18, 2620, 273, 365, 18, 945, 12, 2890, 18, 2074, 768, 18, 718, 16, 7237, 33, 5510, 13, 225, 309, 486, 365, 18, 4631, 30, 1729, 273, 365, 18, 945, 2...
u"Simulation fanart copy (%s) to (%s)\n" % (tmp, dest)
u"Simulation fanart copy (%s) to (%s)\n" % (tmp, newFilename)
def processMythTvMetaData(self): '''Check each video file in the mythvideo directories download graphics files and meta data then update MythTV data base meta data with any new information. ''' # If there were directories specified move them and update the MythTV db meta data accordingly if self.config['video_dir']: if len(self.config['video_dir']) % 2 == 0: validFiles = self._moveVideoFiles(self.config['video_dir']) else: sys.stderr.write(u"\n! Error: When specifying target (file or directory) to move to a destination (directory) they must always be in pairs (target and destination directory).\nYou specified an uneven number of variables (%d) for target and destination pairs.\nVariable count (%s)\n" % (len(self.config['video_dir']), self.config['video_dir'])) sys.exit(False)
881a9111c0613ff73ff321ac94e0e0ed204b9de1 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/13713/881a9111c0613ff73ff321ac94e0e0ed204b9de1/jamu.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1207, 12062, 451, 56, 90, 6998, 12, 2890, 4672, 9163, 1564, 1517, 6191, 585, 316, 326, 3399, 451, 9115, 6402, 4224, 17313, 1390, 471, 2191, 501, 1508, 1089, 8005, 451, 15579, 501, 1026, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1207, 12062, 451, 56, 90, 6998, 12, 2890, 4672, 9163, 1564, 1517, 6191, 585, 316, 326, 3399, 451, 9115, 6402, 4224, 17313, 1390, 471, 2191, 501, 1508, 1089, 8005, 451, 15579, 501, 1026, ...
f_all = 100.0 / c_all f_present = 100.0 / c_present
if c_all != 0: f_all = 100.0 / c_all else: f_all = 1 if c_present != 0: f_present = 100.0 / c_present else: f_present = 1
def admin_stats(edition): # Chart sizes CHART_X = 800 CHART_Y = 370 all_browsers = defaultdict(int) all_brver = defaultdict(int) all_platforms = defaultdict(int) present_browsers = defaultdict(int) present_brver = defaultdict(int) present_platforms = defaultdict(int) c_all = 0 c_present = 0 for p in Participant.query.filter_by(edition=edition): if p.useragent: c_all += 1 ua = UserAgent(p.useragent) all_browsers[ua.browser] += 1 all_brver['%s %s' % (ua.browser, ua.version.split('.')[0])] += 1 all_platforms[ua.platform] += 1 for p in Participant.query.filter_by(edition=edition, attended=True): if p.useragent: c_present += 1 ua = UserAgent(p.useragent) present_browsers[ua.browser] += 1 present_brver['%s %s' % (ua.browser, ua.version.split('.')[0])] += 1 present_platforms[ua.platform] += 1 f_all = 100.0 / c_all f_present = 100.0 / c_present # Now make charts # All registrations c_all_browsers = pygooglechart.PieChart2D(CHART_X, CHART_Y) c_all_browsers.add_data(all_browsers.values()) c_all_browsers.set_pie_labels(['%s (%.2f%%)' % (key, all_browsers[key]*f_all) for key in all_browsers.keys()]) c_all_brver = pygooglechart.PieChart2D(CHART_X, CHART_Y) c_all_brver.add_data(all_brver.values()) c_all_brver.set_pie_labels(['%s (%.2f%%)' % (key, all_brver[key]*f_all) for key in all_brver.keys()]) c_all_platforms = pygooglechart.PieChart2D(CHART_X, CHART_Y) c_all_platforms.add_data(all_platforms.values()) c_all_platforms.set_pie_labels(['%s (%.2f%%)' % (key, all_platforms[key]*f_all) for key in all_platforms.keys()]) # Present at venue c_present_browsers = pygooglechart.PieChart2D(CHART_X, CHART_Y) c_present_browsers.add_data(present_browsers.values()) c_present_browsers.set_pie_labels(['%s (%.2f%%)' % (key, present_browsers[key]*f_present) for key in present_browsers.keys()]) c_present_brver = pygooglechart.PieChart2D(CHART_X, CHART_Y) c_present_brver.add_data(present_brver.values()) c_present_brver.set_pie_labels(['%s (%.2f%%)' % (key, present_brver[key]*f_present) for key in present_brver.keys()]) c_present_platforms = pygooglechart.PieChart2D(CHART_X, CHART_Y) c_present_platforms.add_data(present_platforms.values()) c_present_platforms.set_pie_labels(['%s (%.2f%%)' % (key, present_platforms[key]*f_present) for key in present_platforms.keys()]) return render_template('stats.html', all_browsers = c_all_browsers.get_url(), all_brver = c_all_brver.get_url(), all_platforms = c_all_platforms.get_url(), present_browsers = c_present_browsers.get_url(), present_brver = c_present_brver.get_url(), present_platforms = c_present_platforms.get_url() )
f6b30f87a907d7aff5abd8e86be01ff747fad3f5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12083/f6b30f87a907d7aff5abd8e86be01ff747fad3f5/website.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3981, 67, 5296, 12, 329, 608, 4672, 225, 468, 14804, 8453, 6469, 4928, 67, 60, 273, 1725, 713, 6469, 4928, 67, 61, 273, 890, 7301, 225, 777, 67, 70, 13221, 273, 13090, 12, 474, 13, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3981, 67, 5296, 12, 329, 608, 4672, 225, 468, 14804, 8453, 6469, 4928, 67, 60, 273, 1725, 713, 6469, 4928, 67, 61, 273, 890, 7301, 225, 777, 67, 70, 13221, 273, 13090, 12, 474, 13, 7...
if len(elt_list) == 0: raise ParseError()
if len(elt_list) == 0: raise ParseError("Bad dotted name")
def parse_dotted_name(elt_list, strip_parens=True): """ @bug: does not handle 'x.(y).z' """ if len(elt_list) == 0: raise ParseError() # Handle ((x.y).z). (If the contents of the parens include # anything other than dotted names, such as (x,y), then we'll # catch it below and raise a ParseError. while (isinstance(elt_list[0], list) and len(elt_list[0]) >= 3 and elt_list[0][0] == (token.OP, '(') and elt_list[0][-1] == (token.OP, ')')): elt_list[:1] = elt_list[0][1:-1] if len(elt_list) % 2 != 1: raise ParseError() name = DottedName(parse_name(elt_list[0], True)) for i in range(2, len(elt_list), 2): dot, identifier = elt_list[i-1], elt_list[i] if dot != (token.OP, '.'): raise ParseError() name = DottedName(name, parse_name(identifier, True)) return name
76aa87c4f3f84f87b6c947e3387d535503259a2a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3512/76aa87c4f3f84f87b6c947e3387d535503259a2a/docparser.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 67, 9811, 2344, 67, 529, 12, 20224, 67, 1098, 16, 2569, 67, 1848, 2387, 33, 5510, 4672, 3536, 632, 925, 30, 1552, 486, 1640, 296, 92, 18, 12, 93, 2934, 94, 11, 3536, 309, 562, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 67, 9811, 2344, 67, 529, 12, 20224, 67, 1098, 16, 2569, 67, 1848, 2387, 33, 5510, 4672, 3536, 632, 925, 30, 1552, 486, 1640, 296, 92, 18, 12, 93, 2934, 94, 11, 3536, 309, 562, ...
answer = dialog = wxTextEntryDialog ( None, 'question', 'Title Here', '' )
self.output(question) if password: answer = wx.PasswordEntryDialog( None, question, '','') else: answer = wx.TextEntryDialog( None, question, '', '' ) answer.ShowModal() self.output(answer+'\n') return answer.GetValue() or ''
def input(self, question, password = False): """ Works like raw_input(), but returns a unicode string instead of ASCII.
176450fa59c1dbbb6b48a4831399b45a8d91aad6 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/9005/176450fa59c1dbbb6b48a4831399b45a8d91aad6/wxpython_interface.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 810, 12, 2890, 16, 5073, 16, 2201, 273, 1083, 4672, 3536, 4147, 87, 3007, 1831, 67, 2630, 9334, 1496, 1135, 279, 5252, 533, 3560, 434, 11768, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 810, 12, 2890, 16, 5073, 16, 2201, 273, 1083, 4672, 3536, 4147, 87, 3007, 1831, 67, 2630, 9334, 1496, 1135, 279, 5252, 533, 3560, 434, 11768, 18, 2, -100, -100, -100, -100, -100, -100, ...
Return difference distribution matrix $A$ for this S-box. The rows of $A$ encode the differences $\Delta I$ of the input and the columns encode the difference $\Delta O$ for the output. The bits are ordered according to the endianess of this S-box. The value at $A[\Delta I,\Delta O]$ encoded how often $\Delta O$ is the actual output difference given $\Delta I$ as input difference. EXAMPLE:
Return difference distribution matrix ``A`` for this S-box. The rows of ``A`` encode the differences ``Delta I`` of the input and the columns encode the difference ``Delta O`` for the output. The bits are ordered according to the endianess of this S-box. The value at ``A[Delta I,Delta O]`` encoded how often ``Delta O`` is the actual output difference given ``Delta I`` as input difference. See [Heys02]_ for an introduction to differential cryptanalysis. EXAMPLE::
def difference_distribution_matrix(self): """ Return difference distribution matrix $A$ for this S-box.
d8d819d404da04929f5525b5dd795973a8bd80ae /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/9890/d8d819d404da04929f5525b5dd795973a8bd80ae/sbox.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7114, 67, 16279, 67, 5667, 12, 2890, 4672, 3536, 2000, 7114, 7006, 3148, 271, 37, 8, 364, 333, 348, 17, 2147, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7114, 67, 16279, 67, 5667, 12, 2890, 4672, 3536, 2000, 7114, 7006, 3148, 271, 37, 8, 364, 333, 348, 17, 2147, 18, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -...
solr_select = solr_select_url + "?version=2.2&defType=dismax&q.op=AND&q=%s&qf=text+title^5+author_name^5&bf=sqrt(edition_count)^10&start=%d&rows=%d&fl=key,author_name,author_key,title,subtitle,edition_count,ia,has_fulltext,first_publish_year,cover_edition_key,public_scan_b,lending_edition_s,overdrive_s&qt=standard&wt=standard" % (q, offset, rows)
solr_select = solr_select_url + "?version=2.2&defType=dismax&q.op=AND&q=%s&qf=text+title^5+author_name^5&bf=sqrt(edition_count)^10&start=%d&rows=%d&fl=%s&qt=standard&wt=standard" % (q, offset, rows, fl)
def run_solr_query(param = {}, rows=100, page=1, sort=None, spellcheck_count=None): if spellcheck_count == None: spellcheck_count = default_spellcheck_count q_list = [] if 'q' in param: q_param = param['q'].strip() else: q_param = None offset = rows * (page - 1) use_dismax = False if q_param: if q_param == '*:*': q_list.append(q_param) elif 'NOT ' in q_param: # this is a hack q_list.append(q_param.strip()) elif re_fields.search(q_param): q_list.extend('%s:(%s)' % i for i in parse_query_fields(q_param)) else: isbn = read_isbn(q_param) if isbn: q_list.append('isbn:(%s)' % isbn) else: q_list.append(q_param.strip().replace(':', '\:')) use_dismax = True else: if 'author' in param: v = param['author'].strip() m = re_author_key.search(v) if m: # FIXME: 'OL123A OR OL234A' q_list.append('author_key:(' + m.group(1) + ')') else: v = re_to_esc.sub(lambda m:'\\' + m.group(), v) q_list.append('(author_name:(' + v + ') OR author_alternative_name:(' + v + '))') check_params = ['title', 'publisher', 'isbn', 'oclc', 'lccn', 'contribtor', 'subject', 'place', 'person', 'time'] q_list += ['%s:(%s)' % (k, param[k]) for k in check_params if k in param] if use_dismax: q = web.urlquote(' '.join(q_list)) solr_select = solr_select_url + "?version=2.2&defType=dismax&q.op=AND&q=%s&qf=text+title^5+author_name^5&bf=sqrt(edition_count)^10&start=%d&rows=%d&fl=key,author_name,author_key,title,subtitle,edition_count,ia,has_fulltext,first_publish_year,cover_edition_key,public_scan_b,lending_edition_s,overdrive_s&qt=standard&wt=standard" % (q, offset, rows) else: q = web.urlquote(' '.join(q_list + ['_val_:"sqrt(edition_count)"^10'])) solr_select = solr_select_url + "?version=2.2&q.op=AND&q=%s&start=%d&rows=%d&fl=key,author_name,author_key,title,subtitle,edition_count,ia,has_fulltext,first_publish_year,cover_edition_key&qt=standard&wt=standard" % (q, offset, rows) solr_select += '&spellcheck=true&spellcheck.count=%d' % spellcheck_count solr_select += "&facet=true&" + '&'.join("facet.field=" + f for f in facet_fields) k = 'has_fulltext' if k in param: v = param[k].lower() if v not in ('true', 'false'): del param[k] param[k] == v solr_select += '&fq=%s:%s' % (k, v) for k in facet_list_fields: if k == 'author_facet': k = 'author_key' if k not in param: continue v = param[k] solr_select += ''.join('&fq=%s:"%s"' % (k, url_quote(l)) for l in v if l) if sort: solr_select += "&sort=" + url_quote(sort) reply = urllib.urlopen(solr_select).read() return (reply, solr_select, q_list)
6a2c7c779a9c4cffa9fa2c650f549c71fb64df77 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3913/6a2c7c779a9c4cffa9fa2c650f549c71fb64df77/code.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 67, 22495, 67, 2271, 12, 891, 273, 10615, 2595, 33, 6625, 16, 1363, 33, 21, 16, 1524, 33, 7036, 16, 22377, 1893, 67, 1883, 33, 7036, 4672, 309, 22377, 1893, 67, 1883, 422, 599, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 67, 22495, 67, 2271, 12, 891, 273, 10615, 2595, 33, 6625, 16, 1363, 33, 21, 16, 1524, 33, 7036, 16, 22377, 1893, 67, 1883, 33, 7036, 4672, 309, 22377, 1893, 67, 1883, 422, 599, ...
if filter(lambda x: x.startswith('FAIL: ') or x.startswith('XPASS: '),
if filter(lambda x: x.startswith('FAIL: ') or x.startswith('XPASS: '),
def run(self): command = sys.argv[0]
0fb57cb3754adb068999218d5c2cd5d847087264 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/1387/0fb57cb3754adb068999218d5c2cd5d847087264/main.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 12, 2890, 4672, 1296, 273, 2589, 18, 19485, 63, 20, 65, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 12, 2890, 4672, 1296, 273, 2589, 18, 19485, 63, 20, 65, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -...
def writelines(seq):
def writelines(self, seq):
def writelines(seq): for msg in seq: self.handler.log_error("HG error: %s", msg)
53fa86beeedcbe8995312727acba6d41aac13c49 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11312/53fa86beeedcbe8995312727acba6d41aac13c49/server.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6004, 1465, 12, 2890, 16, 3833, 4672, 364, 1234, 316, 3833, 30, 365, 18, 4176, 18, 1330, 67, 1636, 2932, 44, 43, 555, 30, 225, 738, 87, 3113, 1234, 13, 2, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6004, 1465, 12, 2890, 16, 3833, 4672, 364, 1234, 316, 3833, 30, 365, 18, 4176, 18, 1330, 67, 1636, 2932, 44, 43, 555, 30, 225, 738, 87, 3113, 1234, 13, 2, -100, -100, -100, -100, -10...
entry = self.parser.next() except StopIteration: raise if not self.isTitleExcepted(entry.title) \ and not self.isTextExcepted(entry.text): new_text = entry.text for old, new in self.replacements: new_text = wikipedia.replaceExcept(new_text, old, new, self.excsInside) if new_text != entry.text: return wikipedia.Page(self.site, entry.title)
if not self.skipping: wikipedia.output( 'To resume, use "-xmlstart:%s" on the command line.' % entry.title) except NameError: pass raise KeyboardInterrupt
def next(self): while True: try: entry = self.parser.next() except StopIteration: raise if not self.isTitleExcepted(entry.title) \ and not self.isTextExcepted(entry.text): new_text = entry.text for old, new in self.replacements: new_text = wikipedia.replaceExcept(new_text, old, new, self.excsInside) if new_text != entry.text: return wikipedia.Page(self.site, entry.title)
50b454f69bda5b41d4373a89be17ea7698a61651 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/4404/50b454f69bda5b41d4373a89be17ea7698a61651/replace.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1024, 12, 2890, 4672, 1323, 1053, 30, 775, 30, 1241, 273, 365, 18, 4288, 18, 4285, 1435, 1335, 16179, 30, 1002, 309, 486, 365, 18, 291, 4247, 30212, 329, 12, 4099, 18, 2649, 13, 521, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1024, 12, 2890, 4672, 1323, 1053, 30, 775, 30, 1241, 273, 365, 18, 4288, 18, 4285, 1435, 1335, 16179, 30, 1002, 309, 486, 365, 18, 291, 4247, 30212, 329, 12, 4099, 18, 2649, 13, 521, ...
self.handler = handler
self.handler = handler
def __init__(self, host=config['hostname'] or 'localhost',\ ssl_port=config['port'] or 8443, \ handler=config['prefix'] or "/candlepin",\ cert_file=None, key_file=None): self.host = host self.ssl_port = ssl_port
7194606995abae375a615d2e2c37aa94f2cb10ff /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11137/7194606995abae375a615d2e2c37aa94f2cb10ff/connection.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1479, 33, 1425, 3292, 10358, 3546, 578, 296, 13014, 2187, 64, 5832, 67, 655, 33, 1425, 3292, 655, 3546, 578, 1725, 6334, 23, 16, 521, 1838, 33, 1425, 329...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1479, 33, 1425, 3292, 10358, 3546, 578, 296, 13014, 2187, 64, 5832, 67, 655, 33, 1425, 3292, 655, 3546, 578, 1725, 6334, 23, 16, 521, 1838, 33, 1425, 329...
x, y, range, squareNess = 0, 0, 0, 0
x, y, range, squareNess = 0, 0, 0, float('inf')
def recent_event(a): return (event.timeStamp - \ a.timeStamp) < self._eventDelay
04610bebbd2d49dc6e9e41a841c562813854813e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10608/04610bebbd2d49dc6e9e41a841c562813854813e/window.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 8399, 67, 2575, 12, 69, 4672, 327, 261, 2575, 18, 957, 8860, 300, 521, 279, 18, 957, 8860, 13, 411, 365, 6315, 2575, 6763, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 8399, 67, 2575, 12, 69, 4672, 327, 261, 2575, 18, 957, 8860, 300, 521, 279, 18, 957, 8860, 13, 411, 365, 6315, 2575, 6763, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
if not match_function: self.match_function = self.trivial_match_function()
if match_function: self.match_fucntion = match_function(self.query_name) else: self.match_function = self.trivial_match_function(self.query_name)
def __init__(self, query_name, match_function=None): """ """ config = ConfigParser.ConfigParser() config.read(os.path.expanduser("~/.notmuch-config")) self.db_path = config.get("database", "path") self.email = config.get("user", "primary_email") try: other_emails=config.get("user", "other_email").split(";") self.other_emails=[addr.strip() for addr in other_emails if addr] except ConfigParser.NoOptionError: self.other_emails = []
9879fe34327a0531a251b32a3f2a97392af9dca9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9450/9879fe34327a0531a251b32a3f2a97392af9dca9/notmuch_addresses.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 843, 67, 529, 16, 845, 67, 915, 33, 7036, 4672, 3536, 3536, 642, 273, 25076, 18, 809, 2678, 1435, 642, 18, 896, 12, 538, 18, 803, 18, 12320, 1355, 2932...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 843, 67, 529, 16, 845, 67, 915, 33, 7036, 4672, 3536, 3536, 642, 273, 25076, 18, 809, 2678, 1435, 642, 18, 896, 12, 538, 18, 803, 18, 12320, 1355, 2932...
self.extcomp = 0 self.hdrsize = 0
self.extcomp = 0 self.hdrsize = 0
def __init__(self,target,delta): self.extcomp = 0 # @@@ self.hdrsize = 0 self.tgtsize = os.stat(target).st_size self.dsize = os.stat(delta).st_size if self.tgtsize > 0: self.ideal = 100.0 * self.dsize / self.tgtsize; else: self.ideal = 0.0
08fa3e3b1e873284007e44a6e8402c060f23346a /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/4464/08fa3e3b1e873284007e44a6e8402c060f23346a/xdelta3-regtest.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 3299, 16, 9878, 4672, 365, 18, 408, 2919, 273, 374, 225, 468, 22175, 36, 365, 18, 16587, 1467, 273, 374, 365, 18, 29672, 1467, 273, 1140, 18, 5642, 12, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 3299, 16, 9878, 4672, 365, 18, 408, 2919, 273, 374, 225, 468, 22175, 36, 365, 18, 16587, 1467, 273, 374, 365, 18, 29672, 1467, 273, 1140, 18, 5642, 12, ...
if attroffs in map and hasattr(object, map[attroffs]): return setattr(object, map[attroffs], value)
if attroffs in attrmap and hasattr(obj, attrmap[attroffs]): return setattr(obj, attrmap[attroffs], value)
def _IDC_SetAttr(object, map, attroffs, value): """ Internal function to generically set object attributes Do not use unless you know what you are doing """ if attroffs in map and hasattr(object, map[attroffs]): return setattr(object, map[attroffs], value) else: str = "attribute with offset %d not found, check the offset and report the problem" % attroffs raise KeyError, str
244a3cd02a580c0095170004ec30e922f0d1a8a6 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/6984/244a3cd02a580c0095170004ec30e922f0d1a8a6/idc.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 734, 39, 67, 694, 3843, 12, 1612, 16, 852, 16, 2403, 303, 1403, 87, 16, 460, 4672, 3536, 3186, 445, 358, 5210, 1230, 444, 733, 1677, 2256, 486, 999, 3308, 1846, 5055, 4121, 1846, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 734, 39, 67, 694, 3843, 12, 1612, 16, 852, 16, 2403, 303, 1403, 87, 16, 460, 4672, 3536, 3186, 445, 358, 5210, 1230, 444, 733, 1677, 2256, 486, 999, 3308, 1846, 5055, 4121, 1846, ...