rem
stringlengths
1
226k
add
stringlengths
0
227k
context
stringlengths
6
326k
meta
stringlengths
143
403
input_ids
listlengths
256
256
attention_mask
listlengths
256
256
labels
listlengths
128
128
Ibar.attrib('isSB',1)
Ibar.attrib('"isSB"',1)
def triangular_decomposition(self, algorithm=None): """ Decompose zero-dimensional ideal self into triangular sets.
7ff21255e57efe2e69f373ce46288ddc40c4ccd8 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9890/7ff21255e57efe2e69f373ce46288ddc40c4ccd8/multi_polynomial_ideal.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6882, 13077, 67, 323, 24388, 12, 2890, 16, 4886, 33, 7036, 4672, 3536, 26824, 4150, 3634, 17, 31236, 23349, 365, 1368, 6882, 13077, 1678, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6882, 13077, 67, 323, 24388, 12, 2890, 16, 4886, 33, 7036, 4672, 3536, 26824, 4150, 3634, 17, 31236, 23349, 365, 1368, 6882, 13077, 1678, 18, 2, -100, -100, -100, -100, -100, -100, -100, ...
os.system('%s "%s"'%(DOLPHIN,path))
os.system('%s "%s" &'%(DOLPHIN,path))
def browse_folder(self): """Browse folder""" if self.app.children: child = self.app.childActive if hasattr(child,'fileName'): path = info.dirname(child.fileName) if not os.path.exists(path): path = os.getcwd() else: path = os.getcwd() if os.path.exists(THUNAR): os.system('%s "%s"'%(THUNAR,path)) elif os.path.exists(NAUTILUS): os.system('%s "%s"'%(NAUTILUS,path)) elif os.path.exists(DOLPHIN): os.system('%s "%s"'%(DOLPHIN,path)) elif os.path.exists(KONQUEROR): os.system('%s "%s"'%(KONQUEROR,path)) elif os.path.exists(PCMANFM): os.system('%s "%s"'%(PCMANFM,path)) else: if path[0] == '/': path = 'file://'+path webbrowser.open(path)
3297a811fde121a34b04e766bb0d64586dae5601 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/2464/3297a811fde121a34b04e766bb0d64586dae5601/Parent.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 21670, 67, 5609, 12, 2890, 4672, 3536, 27304, 3009, 8395, 309, 365, 18, 2910, 18, 5906, 30, 1151, 6647, 273, 365, 18, 2910, 18, 3624, 3896, 309, 3859, 12, 3624, 11189, 17812, 11, 4672, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 21670, 67, 5609, 12, 2890, 4672, 3536, 27304, 3009, 8395, 309, 365, 18, 2910, 18, 5906, 30, 1151, 6647, 273, 365, 18, 2910, 18, 3624, 3896, 309, 3859, 12, 3624, 11189, 17812, 11, 4672, ...
self.update_image = os.path.join( self.get_uuid,
self.get_uuid = os.path.join( self.hooks_dir,
def setup(self, config_file): """ Setup config object according to config_file content. """
62c6549bd2aadbc30366a582418dd00b92349938 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5988/62c6549bd2aadbc30366a582418dd00b92349938/config.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3875, 12, 2890, 16, 642, 67, 768, 4672, 3536, 10939, 642, 733, 4888, 358, 642, 67, 768, 913, 18, 3536, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3875, 12, 2890, 16, 642, 67, 768, 4672, 3536, 10939, 642, 733, 4888, 358, 642, 67, 768, 913, 18, 3536, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
print 'ratecv' state = (-8000, ((256, 512),)) if audioop.ratecv(data[0], 1, 1, 8000, 16000, state) != \ ('\001\000\000\001\001\002', state): return 0
print 'ratecv' state = None d1, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state) d2, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state) if d1 + d2 != '\000\000\001\001\002\001\000\000\001\001\002': return 0
def testratecv(data): if verbose: print 'ratecv' state = (-8000, ((256, 512),)) if audioop.ratecv(data[0], 1, 1, 8000, 16000, state) != \ ('\001\000\000\001\001\002', state): return 0 return 1
8c511db5a463b1ee6190e846b4b4c33970acee2b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/8c511db5a463b1ee6190e846b4b4c33970acee2b/test_audioop.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 268, 281, 313, 340, 19774, 12, 892, 4672, 309, 3988, 30, 1172, 296, 5141, 19774, 11, 919, 273, 599, 302, 21, 16, 919, 273, 7447, 556, 18, 5141, 19774, 12, 892, 63, 20, 6487, 404, 16,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 268, 281, 313, 340, 19774, 12, 892, 4672, 309, 3988, 30, 1172, 296, 5141, 19774, 11, 919, 273, 599, 302, 21, 16, 919, 273, 7447, 556, 18, 5141, 19774, 12, 892, 63, 20, 6487, 404, 16,...
else
else:
def __init__(data = None) if data == None: quickfix.IntField.__init__(self, 832) else quickfix.IntField.__init__(self, 832, data)
484890147d4b23aac4b9d0e85e84fceab7e137c3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8819/484890147d4b23aac4b9d0e85e84fceab7e137c3/quickfix_fields.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 892, 273, 599, 13, 309, 501, 422, 599, 30, 9549, 904, 18, 1702, 974, 16186, 2738, 972, 12, 2890, 16, 1725, 1578, 13, 469, 30, 9549, 904, 18, 1702, 974, 16186, 27...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 892, 273, 599, 13, 309, 501, 422, 599, 30, 9549, 904, 18, 1702, 974, 16186, 2738, 972, 12, 2890, 16, 1725, 1578, 13, 469, 30, 9549, 904, 18, 1702, 974, 16186, 27...
moderators = models.ManyToManyField(User, blank=True, default=True, verbose_name=_('Moderators'))
moderators = models.ManyToManyField(User, blank=True, null=True, verbose_name=_('Moderators'))
def posts(self): return Post.objects.filter(topic__forum__category=self).select_related()
a6eb66d367f7ef7eaa92e58bd601b9c9f5cab153 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12938/a6eb66d367f7ef7eaa92e58bd601b9c9f5cab153/models.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 10775, 12, 2890, 4672, 327, 5616, 18, 6911, 18, 2188, 12, 10476, 972, 11725, 972, 4743, 33, 2890, 2934, 4025, 67, 9243, 1435, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 10775, 12, 2890, 4672, 327, 5616, 18, 6911, 18, 2188, 12, 10476, 972, 11725, 972, 4743, 33, 2890, 2934, 4025, 67, 9243, 1435, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -10...
h.putrequest('POST', req.get_selector()) if 'Content-type' not in req.headers:
h.putrequest('POST', req.get_selector(), **skipheaders) if 'content-type' not in headers:
def _start_transaction(self, h, req): try: if req.has_data(): data = req.get_data() h.putrequest('POST', req.get_selector()) if 'Content-type' not in req.headers: h.putheader('Content-type', 'application/x-www-form-urlencoded') if 'Content-length' not in req.headers: h.putheader('Content-length', '%d' % len(data)) else: h.putrequest('GET', req.get_selector()) except (socket.error), err: raise urllib2.URLError(err)
1e0dc291ee96c01ad68635bef43519602a1faf4d /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/11312/1e0dc291ee96c01ad68635bef43519602a1faf4d/keepalive.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1937, 67, 7958, 12, 2890, 16, 366, 16, 1111, 4672, 775, 30, 309, 1111, 18, 5332, 67, 892, 13332, 501, 273, 1111, 18, 588, 67, 892, 1435, 366, 18, 458, 2293, 2668, 3798, 2187, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1937, 67, 7958, 12, 2890, 16, 366, 16, 1111, 4672, 775, 30, 309, 1111, 18, 5332, 67, 892, 13332, 501, 273, 1111, 18, 588, 67, 892, 1435, 366, 18, 458, 2293, 2668, 3798, 2187, 11...
print "DVDCreateSummary"
def createSummary(self): print "DVDCreateSummary" return DVDSummary
9672c9e353abdc56784d21567fdb32c03a9aa0b7 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/6652/9672c9e353abdc56784d21567fdb32c03a9aa0b7/plugin.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 4733, 12, 2890, 4672, 327, 463, 21544, 4733, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 4733, 12, 2890, 4672, 327, 463, 21544, 4733, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -10...
slots_w = None
slots_w = []
def __del__(self): try: self.space.userdel(self) except OperationError, e: e.write_unraisable(self.space, 'method __del__ of ', self) e.clear(self.space) # break up reference cycles if parent_destructor is not None: parent_destructor(self)
9f1c2b5e411c7c892285739267184406d589458c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/6934/9f1c2b5e411c7c892285739267184406d589458c/typedef.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 3771, 972, 12, 2890, 4672, 775, 30, 365, 18, 2981, 18, 1355, 3771, 12, 2890, 13, 1335, 4189, 668, 16, 425, 30, 425, 18, 2626, 67, 318, 354, 291, 429, 12, 2890, 18, 2981, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 3771, 972, 12, 2890, 4672, 775, 30, 365, 18, 2981, 18, 1355, 3771, 12, 2890, 13, 1335, 4189, 668, 16, 425, 30, 425, 18, 2626, 67, 318, 354, 291, 429, 12, 2890, 18, 2981, 16, ...
if linenum == 0: return -1, "", True
def get_line_info(self, linenum): """Get the line indent value, text, and any block start keyword
98f9ff7223406f0c344f530e77fc866a67894c35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/98f9ff7223406f0c344f530e77fc866a67894c35/CodeContext.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 1369, 67, 1376, 12, 2890, 16, 4739, 7924, 4672, 3536, 967, 326, 980, 3504, 460, 16, 977, 16, 471, 1281, 1203, 787, 4932, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 1369, 67, 1376, 12, 2890, 16, 4739, 7924, 4672, 3536, 967, 326, 980, 3504, 460, 16, 977, 16, 471, 1281, 1203, 787, 4932, 2, -100, -100, -100, -100, -100, -100, -100, -100, -10...
self.defaults(mon_eff=False)
self.set_defaults(mon_eff=False)
def __init__(self, usage=None, option_list=None, options_class=None, version=None, conflict_handler='error', description=None, **kwargs): """ Constructor for C{SansOptions}
2c14b0002fecc5ee319023d8dec9c05dafcd0b14 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/763/2c14b0002fecc5ee319023d8dec9c05dafcd0b14/hlr_sas_options.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 4084, 33, 7036, 16, 1456, 67, 1098, 33, 7036, 16, 702, 67, 1106, 33, 7036, 16, 1177, 33, 7036, 16, 7546, 67, 4176, 2218, 1636, 2187, 2477, 33, 7036, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 4084, 33, 7036, 16, 1456, 67, 1098, 33, 7036, 16, 702, 67, 1106, 33, 7036, 16, 1177, 33, 7036, 16, 7546, 67, 4176, 2218, 1636, 2187, 2477, 33, 7036, 16...
self.workingArraySize = 5000*self.numberGlobalEquations
self.workingArraySize = 100*self.numberGlobalEquations
self.pointerToLgdef = lithomop3d.allocateInt( self.numberTimeStepGroups)
164302881cdb1f627221a3976e8ba17a057a59d8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8640/164302881cdb1f627221a3976e8ba17a057a59d8/Lithomop3d_setup.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 365, 18, 10437, 774, 48, 75, 536, 273, 328, 483, 362, 556, 23, 72, 18, 16247, 1702, 12, 365, 18, 2696, 950, 4160, 3621, 13, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 365, 18, 10437, 774, 48, 75, 536, 273, 328, 483, 362, 556, 23, 72, 18, 16247, 1702, 12, 365, 18, 2696, 950, 4160, 3621, 13, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -...
args[1] = os.path.join(sys.exec_prefix, "lib", "pdb.py")
args[1] = os.path.join(sys.prefix, "lib", "pdb.py")
def opt_debug(opt, arg): global print_tree global print_dtree global print_time if arg == "pdb": args = [ sys.executable, "pdb.py" ] + \ filter(lambda x: x != "--debug=pdb", sys.argv) if sys.platform == 'win32': args[1] = os.path.join(sys.exec_prefix, "lib", "pdb.py") sys.exit(os.spawnve(os.P_WAIT, args[0], args, os.environ)) else: args[1] = os.path.join(sys.exec_prefix, "lib", "python" + sys.version[0:3], "pdb.py") os.execvpe(args[0], args, os.environ) elif arg == "tree": print_tree = 1 elif arg == "dtree": print_dtree = 1 elif arg == "time": print_time = 1 else: sys.stderr.write("Warning: %s is not a valid debug type\n" % arg)
13776315b91b68897c4e074e23a307ef6e516698 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12817/13776315b91b68897c4e074e23a307ef6e516698/__init__.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2153, 67, 4148, 12, 3838, 16, 1501, 4672, 2552, 1172, 67, 3413, 2552, 1172, 67, 72, 3413, 2552, 1172, 67, 957, 309, 1501, 422, 315, 17414, 6877, 833, 273, 306, 2589, 18, 17751, 16, 315...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2153, 67, 4148, 12, 3838, 16, 1501, 4672, 2552, 1172, 67, 3413, 2552, 1172, 67, 72, 3413, 2552, 1172, 67, 957, 309, 1501, 422, 315, 17414, 6877, 833, 273, 306, 2589, 18, 17751, 16, 315...
if dupCount == 3:
if dupCount == 5:
def scrapeCourt(courtID, result, verbosity, daemonmode): if verbosity >= 1: result += "NOW SCRAPING COURT: " + str(courtID) + "\n" if verbosity >= 2: print "NOW SCRAPING COURT: " + str(courtID) if (courtID == 1): """ PDFs are available from the first circuit if you go to their RSS feed. So go to their RSS feed we shall. """ urls = ("http://www.ca1.uscourts.gov/opinions/opinionrss.php",) ct = Court.objects.get(courtUUID='ca1') for url in urls: html = urllib2.urlopen(url).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result # this code gets rid of errant ampersands - they throw big errors # when parsing. We replace them later. if '&' in html: punctuationRegex = re.compile(" & ") html = re.sub(punctuationRegex, " &amp; ", html) tree = etree.fromstring(html) else: tree = etree.fromstring(html) caseLinks = tree.xpath("//item/link") descriptions = tree.xpath("//item/description") docTypes = tree.xpath("//item/category") caseNamesAndNumbers = tree.xpath("//item/title") caseDateRegex = re.compile("(\d{2}/\d{2}/\d{4})", re.VERBOSE | re.DOTALL) caseNumberRegex = re.compile("(\d{2}-.*?\W)(.*)$") # incredibly, this RSS feed is in cron order, so new stuff is at the # end. Mind blowing. i = len(caseLinks)-1 if verbosity >= 2: print str(i) dupCount = 0 while i > 0: # First: docType, since we don't support them all... docType = docTypes[i].text.strip() if verbosity >= 2: print docType if "unpublished" in docType.lower(): documentType = "Unpublished" elif "published" in docType.lower(): documentType = "Published" elif "errata" in docType.lower(): documentType = "Errata" else: # something weird we don't know about, punt i -= 1 continue # next, we begin with the caseLink field caseLink = caseLinks[i].text caseLink = urljoin(url, caseLink) # then we download the PDF, make the hash and document myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i -= 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 8: # eighth dup in a a row. BREAK! # this is 8 here b/c this court has tech problems. break i -= 1 continue else: dupCount = 0 # otherwise, we continue doc.documentType = documentType # next: caseDate caseDate = caseDateRegex.search(descriptions[i].text).group(1) splitDate = caseDate.split('/') caseDate = datetime.date(int(splitDate[2]), int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate # next: caseNumber caseNumber = caseNumberRegex.search(caseNamesAndNumbers[i].text)\ .group(1) # next: caseNameShort caseNameShort = caseNumberRegex.search(caseNamesAndNumbers[i].text)\ .group(2) # check for dups, make the object if necessary, otherwise, get it cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i -= 1 return result elif (courtID == 2): """ URL hacking FTW. """ urls = ( "http://www.ca2.uscourts.gov/decisions?IW_DATABASE=OPN&IW_FIELD_TEXT=OPN&IW_SORT=-Date&IW_BATCHSIZE=100", "http://www.ca2.uscourts.gov/decisions?IW_DATABASE=SUM&IW_FIELD_TEXT=SUM&IW_SORT=-Date&IW_BATCHSIZE=100", ) ct = Court.objects.get(courtUUID='ca2') for url in urls: html = urllib2.urlopen(url).read() soup = BeautifulSoup(html) aTagsRegex = re.compile('(.*?.pdf).*?', re.IGNORECASE) caseNumRegex = re.compile('.*/(\d{1,2}-\d{3,4})(.*).pdf') aTags = soup.findAll(attrs={'href' : aTagsRegex}) if daemonmode: # this mess is necessary because the court puts random # (literally) numbers throughout their links. No idea why, # but the solution is to figure out the caselinks here, and to hand # those to the sha1 generator. aTagsEncoded = [] for i in aTags: caseLink = i.get('href') caseLink = aTagsRegex.search(caseLink).group(1) try: caseNumbers = caseNumRegex.search(caseLink).group(1) except: caseNumbers = "" aTagsEncoded.append(caseNumbers) # if it's daemonmode, see if the court has changed changed = courtChanged(url, str(aTagsEncoded)) if not changed: # if not, bail. If so, continue to the scraping. return result i = 0 dupCount = 0 while i < len(aTags): # we begin with the caseLink field caseLink = aTags[i].get('href') caseLink = aTagsRegex.search(caseLink).group(1) caseLink = urljoin(url, caseLink) if verbosity >= 2: print str(i) + ": " + caseLink myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # using caseLink, we can get the caseNumber and documentType caseNum = caseNumRegex.search(caseLink).group(1) if verbosity >= 2: print "caseNum: " + str(caseNum) # and the docType documentType = caseNumRegex.search(caseLink).group(2) if 'opn' in documentType: # it's unpublished doc.documentType = "Published" elif 'so' in documentType: doc.documentType = "Unpublished" # next, the caseNameShort (there's probably a better way to do this. caseNameShort = aTags[i].parent.parent.nextSibling.nextSibling\ .nextSibling.nextSibling.contents[0] # next, we can do the caseDate caseDate = aTags[i].parent.parent.nextSibling.nextSibling\ .nextSibling.nextSibling.nextSibling.nextSibling.contents[0]\ .replace('&nbsp;', ' ').strip() # some caseDate cleanup splitDate = caseDate.split('-') caseDate = datetime.date(int(splitDate[2]),int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate # check for duplicates, make the object in their absence cite, created = hasDuplicate(caseNum, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 3): """ This URL provides the latest 25 cases, so I need to pick out the new ones and only get those. I can do this efficiently by trying to do each, and then giving up once I hit one that I've done before. This will work because they are in reverse chronological order. """ # if these URLs change, the docType identification (below) will need # to be updated. It's lazy, but effective. urls = ("http://www.ca3.uscourts.gov/recentop/week/recprec.htm", "http://www.ca3.uscourts.gov/recentop/week/recnon2day.htm",) ct = Court.objects.get(courtUUID='ca3') for url in urls: html = urllib2.urlopen(url).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result soup = BeautifulSoup(html) # all links ending in pdf, case insensitive regex = re.compile("pdf$", re.IGNORECASE) aTags = soup.findAll(attrs={"href": regex}) # we will use these vars in our while loop, better not to compile them # each time regexII = re.compile('\d{2}/\d{2}/\d{2}') regexIII = re.compile('\d{2}-\d{4}') i = 0 dupCount = 0 while i < len(aTags): # caseLink and caseNameShort caseLink = aTags[i].get('href') myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 caseNameShort = aTags[i].contents[0] # caseDate and caseNumber junk = aTags[i].previous.previous.previous try: # this error seems to happen upon dups...not sure why yet caseDate = regexII.search(junk).group(0) caseNumber = regexIII.search(junk).group(0) except: i = i+1 continue # next up is the caseDate splitDate = caseDate.split('/') caseDate = datetime.date(int("20" + splitDate[2]),int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate # Make a decision about the docType. if "recprec.htm" in str(url): doc.documentType = "Published" elif "recnon2day.htm" in str(url): doc.documentType = "Unpublished" cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 4): """The fourth circuit is THE worst form of HTML I've ever seen. It's going to break a lot, but I've done my best to clean it up, and make it reliable.""" urls = ("http://pacer.ca4.uscourts.gov/opinions_today.htm",) ct = Court.objects.get(courtUUID='ca4') for url in urls: html = urllib2.urlopen(url).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result # sadly, beautifulsoup chokes on the lines lines of this file because # the HTML is so bad. Stop laughing - the HTML IS awful, but it's not # funny. Anyway, to make this thing work, we must pull out the target # attributes. And so we do. regex = re.compile("target.*>", re.IGNORECASE) html = re.sub(regex, ">", html) soup = BeautifulSoup(html) # all links ending in pdf, case insensitive regex = re.compile("pdf$", re.IGNORECASE) aTags = soup.findAll(attrs={"href": regex}) i = 0 dupCount = 0 regexII = re.compile('\d{2}/\d{2}/\d{4}') regexIII = re.compile('\d{4}(.*)') while i < len(aTags): # caseLink field, and save it caseLink = aTags[i].get('href') caseLink = urljoin(url, caseLink) myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # using caselink, we can get the caseNumber and documentType fileName = caseLink.split('/')[-1] caseNumber, documentType = fileName.split('.')[0:2] # the caseNumber needs a hyphen inserted after the second digit caseNumber = caseNumber[0:2] + "-" + caseNumber[2:] if documentType == 'U': doc.documentType = 'Unpublished' elif documentType == 'P': doc.documentType = 'Published' else: doc.documentType = "" # next, we do the caseDate and caseNameShort, so we can quit before # we get too far along. junk = aTags[i].contents[0].replace('&nbsp;', ' ').strip() try: # this error seems to happen upon dups...not sure why yet caseDate = cleanString(regexII.search(junk).group(0)) caseNameShort = regexIII.search(junk).group(1) except: i += 1 continue # some caseDate cleanup splitDate = caseDate.split('/') caseDate = datetime.date(int(splitDate[2]),int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate # let's check for duplicates before we proceed cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 5): """New fifth circuit scraper, which can get back versions all the way to 1992! This is exciting, but be warned, the search is not reliable on recent dates. It has been known not to bring back results that are definitely within the set. Watch closely. """ urls = ("http://www.ca5.uscourts.gov/Opinions.aspx",) ct = Court.objects.get(courtUUID='ca5') for url in urls: # Use just one date, it seems to work better this way. todayObject = datetime.date.today() if verbosity >= 2: print "start date: " + str(todayObject) startDate = time.strftime('%m/%d/%Y', todayObject.timetuple()) if verbosity >= 2: print "Start date is: " + startDate # these are a mess because the court has a security check. postValues = { '__EVENTTARGET' : '', '__EVENTARGUMENT' : '', '__VIEWSTATE' : '/wEPDwULLTEwOTU2NTA2NDMPZBYCAgEPZBYKAgEPDxYIHgtDZWxsUGFkZGluZ2YeC0NlbGxTcGFjaW5nZh4JQmFja0NvbG9yCRcQJ/8eBF8hU0ICiIAYZGQCAw8PFggfAGYfAWYfAgmZzP//HwMCiIAYZGQCGQ9kFgYCAg8PFgQfAgqHAR8DAghkZAIEDw8WBB8CCocBHwMCCGRkAgYPDxYEHwIKhwEfAwIIZGQCGw9kFooBAgIPDxYEHwIKhwEfAwIIZGQCBA8PFgQfAgqHAR8DAghkZAIGDw8WBB8CCocBHwMCCGRkAggPDxYEHwIKhwEfAwIIZGQCCg8PFgQfAgqHAR8DAghkZAIMDw8WBB8CCocBHwMCCGRkAg4PDxYEHwIKhwEfAwIIZGQCEA8PFgQfAgqHAR8DAghkZAISDw8WBB8CCocBHwMCCGRkAhQPDxYEHwIKhwEfAwIIZGQCFg8PFgQfAgqHAR8DAghkZAIYDw8WBB8CCocBHwMCCGRkAhoPDxYEHwIKhwEfAwIIZGQCHA8PFgQfAgqHAR8DAghkZAIeDw8WBB8CCocBHwMCCGRkAiAPDxYEHwIKhwEfAwIIZGQCIg8PFgQfAgqHAR8DAghkZAIkDw8WBB8CCocBHwMCCGRkAiYPDxYEHwIKhwEfAwIIZGQCKA8PFgQfAgqHAR8DAghkZAIqDw8WBB8CCocBHwMCCGRkAiwPDxYEHwIKhwEfAwIIZGQCLg8PFgQfAgqHAR8DAghkZAIwDw8WBB8CCocBHwMCCGRkAjIPDxYEHwIKhwEfAwIIZGQCNA8PFgQfAgqHAR8DAghkZAI2Dw8WBB8CCocBHwMCCGRkAjgPDxYEHwIKhwEfAwIIZGQCOg8PFgQfAgqHAR8DAghkZAI8Dw8WBB8CCocBHwMCCGRkAj4PDxYEHwIKhwEfAwIIZGQCQA8PFgQfAgqHAR8DAghkZAJCDw8WBB8CCocBHwMCCGRkAkQPDxYEHwIKhwEfAwIIZGQCRg8PFgQfAgqHAR8DAghkZAJIDw8WBB8CCocBHwMCCGRkAkoPDxYEHwIKhwEfAwIIZGQCTA8PFgQfAgqHAR8DAghkZAJODw8WBB8CCocBHwMCCGRkAlAPDxYEHwIKhwEfAwIIZGQCUg8PFgQfAgqHAR8DAghkZAJUDw8WBB8CCocBHwMCCGRkAlYPDxYEHwIKhwEfAwIIZGQCWA8PFgQfAgqHAR8DAghkZAJaDw8WBB8CCocBHwMCCGRkAlwPDxYEHwIKhwEfAwIIZGQCXg8PFgQfAgqHAR8DAghkZAJgDw8WBB8CCocBHwMCCGRkAmIPDxYEHwIKhwEfAwIIZGQCZA8PFgQfAgqHAR8DAghkZAJmDw8WBB8CCocBHwMCCGRkAmgPDxYEHwIKhwEfAwIIZGQCag8PFgQfAgqHAR8DAghkZAJsDw8WBB8CCocBHwMCCGRkAm4PDxYEHwIKhwEfAwIIZGQCcA8PFgQfAgqHAR8DAghkZAJyDw8WBB8CCocBHwMCCGRkAnQPDxYEHwIKhwEfAwIIZGQCdg8PFgQfAgqHAR8DAghkZAJ4Dw8WBB8CCocBHwMCCGRkAnoPDxYEHwIKhwEfAwIIZGQCfA8PFgQfAgqHAR8DAghkZAJ+Dw8WBB8CCocBHwMCCGRkAoABDw8WBB8CCocBHwMCCGRkAoIBDw8WBB8CCocBHwMCCGRkAoQBDw8WBB8CCocBHwMCCGRkAoYBDw8WBB8CCocBHwMCCGRkAogBDw8WBB8CCocBHwMCCGRkAooBDw8WBB8CCocBHwMCCGRkAh0PEGRkFgECAmRkcx2JRvTiy039dck7+vdOCUS6J5s=', 'txtBeginDate' : startDate, 'txtEndDate' : '', 'txtDocketNumber' : '', 'txtTitle=' : '', 'btnSearch' : 'Search', '__EVENTVALIDATION' : '/wEWCALd2o3pAgLH8d2nDwKAzfnNDgLChrRGAr2b+P4BAvnknLMEAqWf8+4KAqC3sP0KVcw25xdB1YPfbcUwUCqEYjQqaqM=', } data = urllib.urlencode(postValues) req = urllib2.Request(url, data) html = urllib2.urlopen(req).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result soup = BeautifulSoup(html) #if verbosity >= 2: print soup #all links ending in pdf, case insensitive aTagRegex = re.compile("pdf$", re.IGNORECASE) aTags = soup.findAll(attrs={"href": aTagRegex}) unpubRegex = re.compile(r"pinions.*unpub") i = 0 dupCount = 0 numP = 0 numQ = 0 while i < len(aTags): # this page has PDFs that aren't cases, we must filter them out if 'pinion' not in str(aTags[i]): # it's not an opinion, increment and punt if verbosity >= 2: print "Punting non-opinion URL: " + str(aTags[i]) i += 1 continue # we begin with the caseLink field caseLink = aTags[i].get('href') caseLink = urljoin(url, caseLink) myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue # next, we do the docStatus field, b/c we need to include it in # the dup check. This is because we need to abort after we have # three non-precedential and three precedential from this court. if unpubRegex.search(str(aTags[i])) == None: # it's published, else it's unpublished documentType = "Published" numP += 1 else: documentType = "Unpublished" numQ += 1 if verbosity >= 2: print "documentType: " + documentType doc.documentType = documentType if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount >= 3 and numP >= 3 and numQ >= 3: # third dup in a a row for both U and P. break i += 1 continue else: dupCount = 0 # using caseLink, we can get the caseNumber and documentType caseNumber = aTags[i].contents[0] # next, we do the caseDate caseDate = aTags[i].next.next.contents[0].contents[0] # some caseDate cleanup splitDate = caseDate.split('/') caseDate = datetime.date(int(splitDate[2]),int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate # next, we do the caseNameShort caseNameShort = aTags[i].next.next.next.next.next.contents[0]\ .contents[0] # now that we have the caseNumber and caseNameShort, we can dup check cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 6): """Results are available without an HTML POST, but those results lack a date field. Hence, we must do an HTML POST. Missing a day == OK. Just need to monkey with the date POSTed. """ urls = ("http://www.ca6.uscourts.gov/cgi-bin/opinions.pl",) ct = Court.objects.get(courtUUID = 'ca6') for url in urls: today = datetime.date.today() formattedToday = str(today.month) + '/' + str(today.day) + '/' +\ str(today.year) postValues = { 'CASENUM' : '', 'TITLE' : '', 'FROMDATE' : formattedToday, 'TODATE' : formattedToday, 'OPINNUM' : '' } data = urllib.urlencode(postValues) req = urllib2.Request(url, data) html = urllib2.urlopen(req).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result soup = BeautifulSoup(html) aTagsRegex = re.compile('pdf$', re.IGNORECASE) aTags = soup.findAll(attrs={'href' : aTagsRegex}) i = 0 dupCount = 0 while i < len(aTags): # we begin with the caseLink field caseLink = aTags[i].get('href') caseLink = urljoin(url, caseLink) myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # using caseLink, we can get the caseNumber and documentType caseNumber = aTags[i].next.next.next.next.next.contents[0] # using the filename, we can determine the documentType... fileName = aTags[i].contents[0] if 'n' in fileName: # it's unpublished doc.documentType = "Unpublished" elif 'p' in fileName: doc.documentType = "Published" # next, we can do the caseDate caseDate = aTags[i].next.next.next.next.next.next.next.next\ .contents[0] caseDate = cleanString(caseDate) # some caseDate cleanup splitDate = caseDate.split('/') caseDate = datetime.date(int(splitDate[0]),int(splitDate[1]), int(splitDate[2])) doc.dateFiled = caseDate # next, the caseNameShort (there's probably a better way to do this. caseNameShort = aTags[i].next.next.next.next.next.next.next.next\ .next.next.next # now that we have the caseNumber and caseNameShort, we can dup check cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 7): """another court where we need to do a post. This will be a good starting place for getting the judge field, when we're ready for that. Missing a day == OK. Queries return cases for the past week. """ urls = ("http://www.ca7.uscourts.gov/fdocs/docs.fwx",) ct = Court.objects.get(courtUUID = 'ca7') for url in urls: # if these strings change, check that documentType still gets set correctly. dataStrings = ("yr=&num=&Submit=Past+Week&dtype=Opinion&scrid=Select+a+Case", "yr=&num=&Submit=Past+Week&dtype=Nonprecedential+Disposition&scrid=Select+a+Case",) for dataString in dataStrings: req = urllib2.Request(url, dataString) html = urllib2.urlopen(req).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url+dataString, html) if not changed: # if not, bail. If so, continue to the scraping. return result soup = BeautifulSoup(html) aTagsRegex = re.compile('pdf$', re.IGNORECASE) aTags = soup.findAll(attrs={'href' : aTagsRegex}) i = 0 dupCount = 0 while i < len(aTags): # we begin with the caseLink field caseLink = aTags[i].get("href") caseLink = urljoin(url, caseLink) myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # using caseLink, we can get the caseNumber and documentType caseNumber = aTags[i].previous.previous.previous.previous.previous\ .previous.previous.previous.previous.previous # next up: caseDate caseDate = aTags[i].previous.previous.previous.contents[0] caseDate = cleanString(caseDate) splitDate = caseDate.split('/') caseDate = datetime.date(int(splitDate[2]), int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate # next up: caseNameShort caseNameShort = aTags[i].previous.previous.previous.previous\ .previous.previous.previous # next up: docStatus if "type=Opinion" in dataString: doc.documentType = "Published" elif "type=Nonprecedential+Disposition" in dataString: doc.documentType = "Unpublished" # now that we have the caseNumber and caseNameShort, we can dup check cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 8): urls = ("http://www.ca8.uscourts.gov/cgi-bin/new/today2.pl",) ct = Court.objects.get(courtUUID = 'ca8') for url in urls: html = urllib2.urlopen(url).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result soup = BeautifulSoup(html) aTagsRegex = re.compile('pdf$', re.IGNORECASE) aTags = soup.findAll(attrs={'href' : aTagsRegex}) caseNumRegex = re.compile('(\d{2})(\d{4})(u|p)', re.IGNORECASE) caseDateRegex = re.compile('(\d{2}/\d{2}/\d{4})(.*)(</b>)') i = 0 dupCount = 0 while i < len(aTags): # we begin with the caseLink field caseLink = aTags[i].get('href') caseLink = urljoin(url, caseLink) myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # using caseLink, we can get the caseNumber and documentType junk = aTags[i].contents[0] caseNumber = caseNumRegex.search(junk).group(1) + "-" +\ caseNumRegex.search(junk).group(2) documentType = caseNumRegex.search(junk).group(3).upper() if documentType == 'U': doc.documentType = 'Unpublished' elif documentType == 'P': doc.documentType = 'Published' # caseDate is next on the block junk = str(aTags[i].next.next.next) caseDate = caseDateRegex.search(junk).group(1) caseDate = cleanString(caseDate) caseNameShort = caseDateRegex.search(junk).group(2) # some caseDate cleanup splitDate = caseDate.split('/') caseDate = datetime.date(int(splitDate[2]),int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate # now that we have the caseNumber and caseNameShort, we can dup check cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 9): """This court, by virtue of having a javascript laden website, was very hard to parse properly. BeautifulSoup couldn't handle it at all, so lxml has to be used. lxml seems pretty useful, but it was a pain to learn.""" # these URLs redirect now. So much for hacking them. A new approach can probably be done using POST data. urls = ( "http://www.ca9.uscourts.gov/opinions/?o_mode=view&amp;o_sort_field=19&amp;o_sort_type=DESC&o_page_size=100", "http://www.ca9.uscourts.gov/memoranda/?o_mode=view&amp;o_sort_field=21&amp;o_sort_type=DESC&o_page_size=100",) ct = Court.objects.get(courtUUID = 'ca9') for url in urls: if verbosity >= 2: print "Link is now: " + url html = urllib2.urlopen(url).read() tree = fromstring(html) if url == urls[0]: caseLinks = tree.xpath('//table[3]/tbody/tr/td/a') caseNumbers = tree.xpath('//table[3]/tbody/tr/td[2]/label') caseDates = tree.xpath('//table[3]/tbody/tr/td[6]/label') elif url == urls[1]: caseLinks = tree.xpath('//table[3]/tbody/tr/td/a') caseNumbers = tree.xpath('//table[3]/tbody/tr/td[2]/label') caseDates = tree.xpath('//table[3]/tbody/tr/td[7]/label') if daemonmode: # if it's daemonmode, see if the court has changed # this is necessary because the 9th circuit puts random numbers # in their HTML. This gets rid of those, so SHA1 can be generated. listofLinks = [] for i in caseLinks: listofLinks.append(i.get('href')) changed = courtChanged(url, str(listofLinks)) if not changed: # if not, bail. If so, continue to the scraping. return result i = 0 dupCount = 0 while i < len(caseLinks): # we begin with the caseLink field caseLink = caseLinks[i].get('href') caseLink = urljoin(url, caseLink) if verbosity >= 2: print "CaseLink is: " + caseLink # special case if 'no memos filed' in caseLink.lower(): i += 1 continue myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration if verbosity >= 2: print "Error creating file. Punting..." i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # next, we'll do the caseNumber caseNumber = caseNumbers[i].text if verbosity >= 2: print "CaseNumber is: " + caseNumber # next up: document type (static for now) if 'memoranda' in url: doc.documentType = "Unpublished" elif 'opinions' in url: doc.documentType = "Published" if verbosity >= 2: print "Document type is: " + doc.documentType # next up: caseDate splitDate = caseDates[i].text.split('/') caseDate = datetime.date(int(splitDate[2]), int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate if verbosity >= 2: print "CaseDate is: " + str(caseDate) #next up: caseNameShort caseNameShort = titlecase(caseLinks[i].text.lower()) if verbosity >= 2: print "CaseNameShort is: " + caseNameShort + "\n\n" # now that we have the caseNumber and caseNameShort, we can dup check cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 10): # a daily feed of all the items posted THAT day. Missing a day == bad. urls = ("http://www.ck10.uscourts.gov/opinions/new/daily_decisions.rss",) ct = Court.objects.get(courtUUID = 'ca10') for url in urls: html = urllib2.urlopen(url).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result # this code gets rid of errant ampersands - they throw big errors # when parsing. We replace them later. if '&' in html: punctuationRegex = re.compile(" & ") html = re.sub(punctuationRegex, " &amp; ", html) tree = etree.fromstring(html) else: tree = etree.fromstring(html) caseLinks = tree.xpath("//item/link") descriptions = tree.xpath("//item/description") docTypes = tree.xpath("//item/category") caseNames = tree.xpath("//item/title") caseDateRegex = re.compile("(\d{2}/\d{2}/\d{4})", re.VERBOSE | re.DOTALL) caseNumberRegex = re.compile("(\d{2}-\d{4})(.*)$") i = 0 dupCount = 0 while i < len(caseLinks): # we begin with the caseLink field caseLink = caseLinks[i].text caseLink = urljoin(url, caseLink) if verbosity >= 2: print "Link: " + caseLink myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration if verbosity >= 1: print "Error creating file, punting." i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # next: docType (this order of if statements IS correct) docType = docTypes[i].text.strip() if "unpublished" in docType.lower(): doc.documentType = "Unpublished" elif "published" in docType.lower(): doc.documentType = "Published" else: # it's an errata, or something else we don't care about i += 1 continue # next: caseDate caseDate = caseDateRegex.search(descriptions[i].text).group(1) splitDate = caseDate.split('/') caseDate = datetime.date(int(splitDate[2]), int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate if verbosity >= 2: print "Case date is: " + str(caseDate) # next: caseNumber caseNumber = caseNumberRegex.search(descriptions[i].text)\ .group(1) if verbosity >= 2: print "Case number is: " + caseNumber # next: caseNameShort caseNameShort = caseNames[i].text if verbosity >= 2: print "Case name is: " + caseNameShort # check for dups, make the object if necessary, otherwise, get it cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 11): """Prior to rev 313 (2010-04-27), this got published documents only, using the court's RSS feed. Currently, it uses lxml to parse the HTML on the published and unpublished feeds. It can be set to do any date range desired, however such modifications should likely go in back_scrape.py.""" # Missing a day == OK. urls = ( "http://www.ca11.uscourts.gov/unpub/searchdate.php", "http://www.ca11.uscourts.gov/opinions/searchdate.php", ) ct = Court.objects.get(courtUUID = 'ca11') for url in urls: date = time.strftime('%Y-%m', datetime.date.today().timetuple()) if verbosity >= 2: print "date: " + str(date) postValues = { 'date' : date, } data = urllib.urlencode(postValues) req = urllib2.Request(url, data) html = urllib2.urlopen(req).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result tree = fromstring(html) if 'unpub' in url: caseNumbers = tree.xpath('//table[3]//table//table/tr[1]/td[2]') caseLinks = tree.xpath('//table[3]//table//table/tr[3]/td[2]/a') caseDates = tree.xpath('//table[3]//table//table/tr[4]/td[2]') caseNames = tree.xpath('//table[3]//table//table/tr[6]/td[2]') elif 'opinion' in url: caseNumbers = tree.xpath('//table[3]//td[3]//table/tr[1]/td[2]') caseLinks = tree.xpath('//table[3]//td[3]//table/tr[3]/td[2]/a') caseDates = tree.xpath('//table[3]//td[3]//table/tr[4]/td[2]') caseNames = tree.xpath('//table[3]//td[3]//table/tr[6]/td[2]') ''' # for debugging print "length: " + str(len(caseNames)) for foo in caseNames: print str(foo.text) return result''' i = 0 dupCount = 0 while i < len(caseNumbers): caseLink = caseLinks[i].get('href') caseLink = urljoin(url, caseLink) myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" if verbosity >= 2: print "Duplicate found at " + str(i) dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 if 'unpub' in url: doc.documentType = "Unpublished" elif 'opinion' in url: doc.documentType = "Published" if verbosity >= 2: print "documentType: " + str(doc.documentType) cleanDate = cleanString(caseDates[i].text) doc.dateFiled = datetime.datetime(*time.strptime(cleanDate, "%m-%d-%Y")[0:5]) if verbosity >= 2: print "dateFiled: " + str(doc.dateFiled) caseNameShort = caseNames[i].text caseNumber = caseNumbers[i].text cite, created = hasDuplicate(caseNumber, caseNameShort) if verbosity >= 2: print "caseNameShort: " + cite.caseNameShort print "caseNumber: " + cite.caseNumber + "\n" doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 12): # terrible site. Code assumes that we download the opinion on the day # it is released. If we miss a day, that could cause a problem. urls = ("http://www.cadc.uscourts.gov/bin/opinions/allopinions.asp",) ct = Court.objects.get(courtUUID = 'cadc') for url in urls: html = urllib2.urlopen(url).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result soup = BeautifulSoup(html) aTagsRegex = re.compile('pdf$', re.IGNORECASE) aTags = soup.findAll(attrs={'href' : aTagsRegex}) caseNumRegex = re.compile("(\d{2}-\d{4})") i = 0 dupCount = 0 while i < len(aTags): # we begin with the caseLink field caseLink = aTags[i].get('href') caseLink = urljoin(url, caseLink) myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # using caseLink, we can get the caseNumber caseNumber = caseNumRegex.search(caseLink).group(1) # we can hard-code this b/c the D.C. Court paywalls all # unpublished opinions. doc.documentType = "Published" # caseDate is next on the block caseDate = datetime.date.today() doc.dateFiled = caseDate caseNameShort = aTags[i].next.next.next # now that we have the caseNumber and caseNameShort, we can dup check cite, created = hasDuplicate(caseNumber, caseNameShort) # if that goes well, we save to the DB doc.citation = cite # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result elif (courtID == 13): # running log of all opinions urls = ("http://www.cafc.uscourts.gov/dailylog.html",) ct = Court.objects.get(courtUUID = "cafc") for url in urls: html = urllib2.urlopen(url).read() if daemonmode: # if it's daemonmode, see if the court has changed changed = courtChanged(url, html) if not changed: # if not, bail. If so, continue to the scraping. return result soup = BeautifulSoup(html) aTagsRegex = re.compile('pdf$', re.IGNORECASE) trTags = soup.findAll('tr') # start on the second row, since the first is headers. i = 1 dupCount = 0 while i <= 50: #stop at 50, if no triple dups first. try: caseLink = trTags[i].td.nextSibling.nextSibling.nextSibling\ .nextSibling.nextSibling.nextSibling.a.get('href').strip('.') caseLink = urljoin(url, caseLink) if 'opinion' not in caseLink: # we have a non-case PDF. punt i += 1 continue except: # the above fails when things get funky, in that case, we punt i += 1 continue myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 # next: caseNumber caseNumber = trTags[i].td.nextSibling.nextSibling.contents[0]\ .strip('.pdf') # next: dateFiled dateFiled = trTags[i].td.contents splitDate = dateFiled[0].split("/") dateFiled = datetime.date(int(splitDate[0]), int(splitDate[1]), int(splitDate[2])) doc.dateFiled = dateFiled # next: caseNameShort caseNameShort = trTags[i].td.nextSibling.nextSibling.nextSibling\ .nextSibling.nextSibling.nextSibling.a.contents[0] # next: documentType documentType = trTags[i].td.nextSibling.nextSibling.nextSibling\ .nextSibling.nextSibling.nextSibling.nextSibling.nextSibling\ .contents[0].contents[0] # normalize the result for our internal purposes... if documentType == "N": documentType = "Unpublished" elif documentType == "P": documentType = "Published" doc.documentType = documentType # now that we have the caseNumber and caseNameShort, we can dup check cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result if (courtID == 14): # we do SCOTUS urls = ("http://www.supremecourt.gov/opinions/slipopinions.aspx", "http://www.supremecourt.gov/opinions/in-chambers.aspx", "http://www.supremecourt.gov/opinions/relatingtoorders.aspx",) ct = Court.objects.get(courtUUID = 'scotus') for url in urls: if verbosity >= 2: print "Scraping URL: " + url html = urllib2.urlopen(url).read() tree = fromstring(html) if 'slipopinion' in url: caseLinks = tree.xpath('//table/tr/td[4]/a') caseNumbers = tree.xpath('//table/tr/td[3]') caseDates = tree.xpath('//table/tr/td[2]') elif 'in-chambers' in url: caseLinks = tree.xpath('//table/tr/td[3]/a') caseNumbers = tree.xpath('//table/tr/td[2]') caseDates = tree.xpath('//table/tr/td[1]') elif 'relatingtoorders' in url: caseLinks = tree.xpath('//table/tr/td[3]/a') caseNumbers = tree.xpath('//table/tr/td[2]') caseDates = tree.xpath('//table/tr/td[1]') if daemonmode: # if it's daemonmode, see if the court has changed # this is necessary because the SCOTUS puts random numbers # in their HTML. This gets rid of those, so SHA1 can be generated. listofLinks = [] for i in caseLinks: listofLinks.append(i.get('href')) changed = courtChanged(url, str(listofLinks)) if not changed: # if not, bail. If so, continue to the scraping. return result i = 0 dupCount = 0 while i < len(caseLinks): # we begin with the caseLink field caseLink = caseLinks[i].get('href') caseLink = urljoin(url, caseLink) if verbosity >= 2: print "caseLink: " + caseLink myFile, doc, created, error = makeDocFromURL(caseLink, ct) if error: # things broke, punt this iteration i += 1 continue if not created: # it's an oldie, punt! if verbosity >= 1: result += "Duplicate found at " + str(i) + "\n" if verbosity >= 2: print "Duplicate found at " + str(i) + '\n' dupCount += 1 if dupCount == 3: # third dup in a a row. BREAK! break i += 1 continue else: dupCount = 0 caseNumber = caseNumbers[i].text if verbosity >= 2: print "caseNumber: " + caseNumber caseNameShort = caseLinks[i].text if verbosity >= 2: print "caseNameShort: " + caseNameShort if 'slipopinion' in url: doc.documentType = "Published" elif 'in-chambers' in url: doc.documentType = "In-chambers" elif 'relatingtoorders' in url: doc.documentType = "Relating-to" if verbosity >= 2: print "documentType: " + doc.documentType if '/' in caseDates[i].text: splitDate = caseDates[i].text.split('/') elif '-' in caseDates[i].text: splitDate = caseDates[i].text.split('-') year = int("20" + splitDate[2]) caseDate = datetime.date(year, int(splitDate[0]), int(splitDate[1])) doc.dateFiled = caseDate if verbosity >= 2: print "caseDate: " + str(caseDate) # now that we have the caseNumber and caseNameShort, we can dup check cite, created = hasDuplicate(caseNumber, caseNameShort) # last, save evrything (pdf, citation and document) doc.citation = cite doc.local_path.save(trunc(cleanString(caseNameShort), 80) + ".pdf", myFile) doc.save() i += 1 return result
5f8ab0dce527e0f00d170a17505b959be1785525 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6762/5f8ab0dce527e0f00d170a17505b959be1785525/scrape_and_parse.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 888, 25360, 29328, 88, 12, 71, 477, 88, 734, 16, 563, 16, 11561, 16, 8131, 3188, 4672, 309, 11561, 1545, 404, 30, 563, 1011, 315, 27091, 348, 5093, 2203, 1360, 7910, 1099, 56, 30, 315,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 888, 25360, 29328, 88, 12, 71, 477, 88, 734, 16, 563, 16, 11561, 16, 8131, 3188, 4672, 309, 11561, 1545, 404, 30, 563, 1011, 315, 27091, 348, 5093, 2203, 1360, 7910, 1099, 56, 30, 315,...
if not self.host: return
log(status) return NetworkTransaction().run(lambda: self._post_status_to_server(queue_name, status, patch, results_file))
def update_status(self, queue_name, status, patch=None, results_file=None): # During unit testing, host is None if not self.host: return
f3e267b4dcbf3edb5ac4abe8328f5b954acd1494 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9392/f3e267b4dcbf3edb5ac4abe8328f5b954acd1494/statusserver.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 67, 2327, 12, 2890, 16, 2389, 67, 529, 16, 1267, 16, 4729, 33, 7036, 16, 1686, 67, 768, 33, 7036, 4672, 468, 463, 4017, 2836, 7769, 16, 1479, 353, 599, 309, 486, 365, 18, 2564,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 67, 2327, 12, 2890, 16, 2389, 67, 529, 16, 1267, 16, 4729, 33, 7036, 16, 1686, 67, 768, 33, 7036, 4672, 468, 463, 4017, 2836, 7769, 16, 1479, 353, 599, 309, 486, 365, 18, 2564,...
self.trace("Creating %r" % self)
if self.gc_debug: self.trace("Creating %r" % self)
def __init__(self, handler = None, errback = None): if handler is not None: self.set_handler(handler) if errback is not None: self.set_errback(errback) self.when = None self.trace("Creating %r" % self)
3c73588e50b92a77af22348f11c2019540d620f5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/119/3c73588e50b92a77af22348f11c2019540d620f5/async.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1838, 273, 599, 16, 29642, 273, 599, 4672, 309, 1838, 353, 486, 599, 30, 365, 18, 542, 67, 4176, 12, 4176, 13, 309, 29642, 353, 486, 599, 30, 365, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1838, 273, 599, 16, 29642, 273, 599, 4672, 309, 1838, 353, 486, 599, 30, 365, 18, 542, 67, 4176, 12, 4176, 13, 309, 29642, 353, 486, 599, 30, 365, 18, ...
events = []
events = []
def tearDown(self): del self.engine
550f86f8e1997ee990877076421feaf1936da8eb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5451/550f86f8e1997ee990877076421feaf1936da8eb/PyV8.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 268, 2091, 4164, 12, 2890, 4672, 1464, 365, 18, 8944, 225, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 268, 2091, 4164, 12, 2890, 4672, 1464, 365, 18, 8944, 225, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
list = self.fcn_list[:]
stat_list = self.fcn_list[:]
def get_print_list(self, sel_list): width = self.max_name_len if self.fcn_list: list = self.fcn_list[:] msg = " Ordered by: " + self.sort_type + '\n' else: list = self.stats.keys() msg = " Random listing order was used\n"
d9839e637b7c812d57b335b956f1d674502783c9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8125/d9839e637b7c812d57b335b956f1d674502783c9/pstats.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 1188, 67, 1098, 12, 2890, 16, 357, 67, 1098, 4672, 1835, 273, 365, 18, 1896, 67, 529, 67, 1897, 309, 365, 18, 7142, 82, 67, 1098, 30, 610, 67, 1098, 273, 365, 18, 7142, 82...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 1188, 67, 1098, 12, 2890, 16, 357, 67, 1098, 4672, 1835, 273, 365, 18, 1896, 67, 529, 67, 1897, 309, 365, 18, 7142, 82, 67, 1098, 30, 610, 67, 1098, 273, 365, 18, 7142, 82...
left = ROI[roi_index]['Left'] right = ROI[roi_index]['Right'] top = ROI[roi_index]['Top'] bottom = ROI[roi_index]['Bottom'] front = ROI[roi_index]['Front'] back = ROI[roi_index]['Back']
left = expanded_ROI['Left'] right = expanded_ROI['Right'] top = expanded_ROI['Top'] bottom = expanded_ROI['Bottom'] front = expanded_ROI['Front'] back = expanded_ROI['Back']
def region_grow(label_image, raw_image, ROI, roi_index, roi_inflate, stop_thresh=0.5, N_connectivity=3): """ region_grow(label_image, raw_image, ROI, roi_index, roi_inflate, stop_thresh) starting from a seed (masks or regions in the label_image) grow the regions based on (1) connectivity (2D or 3D) and (2) raw voxel values > stop threshold criterion. Parameters ---------- label_image : {nd_array} an image with labeled regions from get_blobs() method raw_image : {nd_array} raw image from which texture features get extracted ROI : {dictionary} Region of Interest structure that has blob bounding boxes. The largest 2D target bounding box is extracted. roi_index : {int} the single ROI element to apply region growing to. roi_inflate : {list} the maximum increase in the ROI bounding box. For 3D the tuple is [layers, rows, cols] and for 2D it is [rows, cols]. stop_thresh : {float} this is the percent of the voxel mean that the growing region must be greater than. region growing terminates when the raw_image is below this value. N_connectivity : {int} for growing this indicates how connected in a 3x3 or 3x3x3 window the un-labeled sample is. Make less than full connected for growing boundaries Returns ---------- updated_label_image : {nd_array} the label image with the selected ROi after region growing """ _c_ext_struct = NP.dtype([('Left', 'i'), ('Right', 'i'), ('Top', 'i'), ('Bottom', 'i'), ('Front', 'i'), ('Back', 'i'), ('Label', 'i'), ('Mass', 'i'), ('cX', 'f'), ('cY', 'f'), ('cZ', 'f')] ) expanded_ROI = NP.zeros(1, dtype=_c_ext_struct) dimensions = label_image.ndim if dimensions == 3: z_ext = roi_inflate[0] y_ext = roi_inflate[1] x_ext = roi_inflate[2] [layers, rows, cols] = label_image.shape updated_label_image = NP.zeros(layers*rows*cols, dtype=NP.int16).reshape(layers, rows, cols) updated_label_image = label_image.copy() else: y_ext = roi_inflate[0] x_ext = roi_inflate[1] [rows, cols] = label_image.shape updated_label_image = NP.zeros(rows*cols, dtype=NP.int16).reshape(rows, cols) updated_label_image = label_image.copy() if dimensions == 2: left = ROI[roi_index]['Left']-x_ext right = ROI[roi_index]['Right']+x_ext bottom = ROI[roi_index]['Bottom']-y_ext top = ROI[roi_index]['Top']+y_ext Label = ROI[roi_index]['Label'] cutoff = stop_thresh * ROI[roi_index]['voxelMean'] print 'cutoff = ', cutoff if left < 0: left = 0 if bottom < 0: bottom = 0 if right > cols-1: right = cols-1 if top > rows-1: top = rows-1 expanded_ROI['Left'] = left expanded_ROI['Right'] = right expanded_ROI['Top'] = top expanded_ROI['Bottom'] = bottom expanded_ROI['Label'] = Label rows = top-bottom cols = right-left label = NP.zeros(rows*cols, dtype=NP.int16).reshape(rows, cols) section = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols) label = label_image[bottom:top, left:right].copy() section = (raw_image[bottom:top, left:right].astype(NP.float64)).copy() elif dimensions == 3: left = ROI[roi_index]['Left']-x_ext right = ROI[roi_index]['Right']+x_ext bottom = ROI[roi_index]['Bottom']-y_ext top = ROI[roi_index]['Top']+y_ext front = ROI[roi_index]['Front']-z_ext back = ROI[roi_index]['Back']+z_ext Label = ROI[roi_index]['Label'] cutoff = stop_thresh * ROI[roi_index]['voxelMean'] if left < 0: left = 0 if bottom < 0: bottom = 0 if right > cols-1: right = cols-1 if top > rows-1: top = rows-1 if front < 0: front = 0 if back > layers-1: back = layers-1 expanded_ROI['Left'] = left expanded_ROI['Right'] = right expanded_ROI['Top'] = top expanded_ROI['Bottom'] = bottom expanded_ROI['Back'] = back expanded_ROI['Front'] = front expanded_ROI['Label'] = Label rows = top-bottom cols = right-left layers = back-front label = NP.zeros(layers*rows*cols, dtype=NP.int16).reshape(layers, rows, cols) label = label_image[front:back, bottom:top, left:right].copy() section = NP.zeros(layers*rows*cols, dtype=NP.float64).reshape(layers, rows, cols) section = (raw_image[front:back, bottom:top, left:right].astype(NP.float64)).copy() # # this newgrow_ROI gets filled in and the label image is grown # #return label, section newgrow_ROI = NP.zeros(1, dtype=_c_ext_struct) S.region_grow(section, label, expanded_ROI, newgrow_ROI, cutoff, Label, N_connectivity) if dimensions == 2: # adjust for delta window ROI[roi_index]['Left'] = newgrow_ROI['Left'] ROI[roi_index]['Right'] = newgrow_ROI['Right'] ROI[roi_index]['Top'] = newgrow_ROI['Top'] ROI[roi_index]['Bottom'] = newgrow_ROI['Bottom'] left = ROI[roi_index]['Left'] right = ROI[roi_index]['Right'] top = ROI[roi_index]['Top'] bottom = ROI[roi_index]['Bottom'] rows = top-bottom cols = right-left updated_label_image[bottom:top,left:right] = label[0:rows,0:cols] elif dimensions == 3: ROI[roi_index]['Left'] = newgrow_ROI['Left'] ROI[roi_index]['Right'] = newgrow_ROI['Right'] ROI[roi_index]['Top'] = newgrow_ROI['Top'] ROI[roi_index]['Bottom'] = newgrow_ROI['Bottom'] ROI[roi_index]['Front'] = newgrow_ROI['Front'] ROI[roi_index]['Back'] = newgrow_ROI['Back'] left = ROI[roi_index]['Left'] right = ROI[roi_index]['Right'] top = ROI[roi_index]['Top'] bottom = ROI[roi_index]['Bottom'] front = ROI[roi_index]['Front'] back = ROI[roi_index]['Back'] rows = top-bottom cols = right-left layers = back-front updated_label_image[front:back,bottom:top,left:right] = label[0:layers,0:rows,0:cols] #return updated_label_image return label
45295c7a202f419ec5a20ed06086e443dedcc927 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12971/45295c7a202f419ec5a20ed06086e443dedcc927/_segmenter.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3020, 67, 75, 492, 12, 1925, 67, 2730, 16, 1831, 67, 2730, 16, 6525, 45, 16, 25017, 67, 1615, 16, 25017, 67, 267, 2242, 340, 16, 2132, 67, 19237, 33, 20, 18, 25, 16, 423, 67, 3612,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3020, 67, 75, 492, 12, 1925, 67, 2730, 16, 1831, 67, 2730, 16, 6525, 45, 16, 25017, 67, 1615, 16, 25017, 67, 267, 2242, 340, 16, 2132, 67, 19237, 33, 20, 18, 25, 16, 423, 67, 3612,...
pygame.sprite.spritecollide(sprite, group, dokill, collided = None): return Sprite_list
pygame.sprite.spritecollide(sprite, group, dokill, collided=None): return Sprite_list
def spritecollide(sprite, group, dokill, collided = None): """find Sprites in a Group that intersect another Sprite pygame.sprite.spritecollide(sprite, group, dokill, collided = None): return Sprite_list Return a list containing all Sprites in a Group that intersect with another Sprite. Intersection is determined by comparing the Sprite.rect attribute of each Sprite. The dokill argument is a bool. If set to True, all Sprites that collide will be removed from the Group. The collided argument is a callback function used to calculate if two sprites are colliding. it should take two sprites as values, and return a bool value indicating if they are colliding. If collided is not passed, all sprites must have a "rect" value, which is a rectangle of the sprite area, which will be used to calculate the collision. """ crashed = [] if collided is None: # Special case old behaviour for speed. spritecollide = sprite.rect.colliderect if dokill: for s in group.sprites(): if spritecollide(s.rect): s.kill() crashed.append(s) else: for s in group: if spritecollide(s.rect): crashed.append(s) else: if dokill: for s in group.sprites(): if collided(sprite, s): s.kill() crashed.append(s) else: for s in group: if collided(sprite, s): crashed.append(s) return crashed
d9760f3e4782abb02dd98080337626eefdad67ee /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1298/d9760f3e4782abb02dd98080337626eefdad67ee/sprite.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1694, 583, 18997, 8130, 12, 1752, 796, 16, 1041, 16, 302, 601, 737, 16, 645, 549, 785, 273, 599, 4672, 3536, 4720, 5878, 24047, 316, 279, 3756, 716, 9136, 4042, 5878, 796, 225, 2395, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1694, 583, 18997, 8130, 12, 1752, 796, 16, 1041, 16, 302, 601, 737, 16, 645, 549, 785, 273, 599, 4672, 3536, 4720, 5878, 24047, 316, 279, 3756, 716, 9136, 4042, 5878, 796, 225, 2395, 1...
def _test(): class BoundedQueue(_Verbose): def __init__(self, limit): _Verbose.__init__(self) self.mon = RLock() self.rc = Condition(self.mon) self.wc = Condition(self.mon) self.limit = limit self.queue = deque() def put(self, item): self.mon.acquire() while len(self.queue) >= self.limit: self._note("put(%s): queue full", item) self.wc.wait() self.queue.append(item) self._note("put(%s): appended, length now %d", item, len(self.queue)) self.rc.notify() self.mon.release() def get(self): self.mon.acquire() while not self.queue: self._note("get(): queue empty") self.rc.wait() item = self.queue.popleft() self._note("get(): got %s, %d left", item, len(self.queue)) self.wc.notify() self.mon.release() return item class ProducerThread(Thread): def __init__(self, queue, quota): Thread.__init__(self, name="Producer") self.queue = queue self.quota = quota def run(self): from random import random counter = 0 while counter < self.quota: counter = counter + 1 self.queue.put("%s.%d" % (self.name, counter)) _sleep(random() * 0.00001) class ConsumerThread(Thread): def __init__(self, queue, count): Thread.__init__(self, name="Consumer") self.queue = queue self.count = count def run(self): while self.count > 0: item = self.queue.get() print(item) self.count = self.count - 1 NP = 3 QL = 4 NI = 5 Q = BoundedQueue(QL) P = [] for i in range(NP): t = ProducerThread(Q, NI) t.name = "Producer-%d" % (i+1) P.append(t) C = ConsumerThread(Q, NI*NP) for t in P: t.start() _sleep(0.000001) C.start() for t in P: t.join() C.join() if __name__ == '__main__': _test()
def _after_fork(): # This function is called by Python/ceval.c:PyEval_ReInitThreads which # is called from PyOS_AfterFork. Here we cleanup threading module state # that should not exist after a fork. # Reset _active_limbo_lock, in case we forked while the lock was held # by another (non-forked) thread. http://bugs.python.org/issue874900 global _active_limbo_lock _active_limbo_lock = _allocate_lock() # fork() only copied the current thread; clear references to others. new_active = {} current = current_thread() with _active_limbo_lock: for thread in _active.values(): if thread is current: # There is only one active thread. We reset the ident to # its new value since it can have changed. ident = _get_ident() thread._ident = ident new_active[ident] = thread else: # All the others are already stopped. # We don't call _Thread__stop() because it tries to acquire # thread._Thread__block which could also have been held while # we forked. thread._stopped = True _limbo.clear() _active.clear() _active.update(new_active) assert len(_active) == 1
148724d39b082d9ea19297262d97984127594f14 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8546/148724d39b082d9ea19297262d97984127594f14/threading.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 5205, 67, 23335, 13332, 468, 1220, 445, 353, 2566, 635, 6600, 19, 311, 1125, 18, 71, 30, 9413, 13904, 67, 426, 2570, 13233, 1492, 468, 353, 2566, 628, 4707, 4618, 67, 4436, 22662, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 5205, 67, 23335, 13332, 468, 1220, 445, 353, 2566, 635, 6600, 19, 311, 1125, 18, 71, 30, 9413, 13904, 67, 426, 2570, 13233, 1492, 468, 353, 2566, 628, 4707, 4618, 67, 4436, 22662, ...
comment.append('[http://www.google.com/search?sourceid=navclient&q=cache:%s Google cache]' % urllib2.quote(short_url(add_item)))
comment.append('[http://www.google.com/search?sourceid=navclient&q=cache:%s Google cache]' % urllib.quote(short_url(add_item)))
def add_in_urllist(url, add_item, engine, cache_url = None): if (engine == 'google' and config.copyright_check_in_source_google) or \ (engine == 'yahoo' and config.copyright_check_in_source_yahoo) or \ (engine == 'msn' and config.copyright_check_in_source_msn): check_in_source = True else: check_in_source = False if check_in_source or config.copyright_show_date or config.copyright_show_length: s = None cache = False # list to store date, length, cache URL comment = list() try: s = WebPage(add_item) except URL_exclusion: pass except NoWebPage: cache = True if s: # Before of add url in result list, perform the check in source if check_in_source: if s.check_in_source(): return if config.copyright_show_date: date = s.lastmodified() if date: if date[:3] != time.localtime()[:3]: comment.append("%s/%s/%s" % (date[2], date[1], date[0])) unit = 'bytes' if config.copyright_show_length: length = s.length() if length: # convert in kilobyte length /= 1024 unit = 'KB' if length > 1024: # convert in megabyte length /= 1024 unit = 'MB' if length > 0: comment.append("%d %s" % (length, unit)) if cache: if cache_url: if engine == 'google': comment.append('[http://www.google.com/search?sourceid=navclient&q=cache:%s Google cache]' % urllib2.quote(short_url(add_item))) elif engine == 'yahoo': #cache = False #comment.append('[%s Yahoo cache]' % re.sub('&appid=[^&]*','', urllib2.unquote(cache_url))) comment.append("''Yahoo cache''") elif engine == 'msn': comment.append('[%s Live cache]' % re.sub('&lang=[^&]*','', cache_url)) else: comment.append('[http://web.archive.org/*/%s archive.org]' % short_url(add_item)) for i in range(len(url)): if add_item in url[i]: if engine not in url[i][1]: if url[i][2]: comment = url[i][2] url[i] = (add_item, url[i][1] + ', ' + engine, comment) return url.append((add_item, engine, comment)) return
98b49ec34c6d4067c0248a230c0b407821cf72a1 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/4404/98b49ec34c6d4067c0248a230c0b407821cf72a1/copyright.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 267, 67, 718, 1098, 12, 718, 16, 527, 67, 1726, 16, 4073, 16, 1247, 67, 718, 273, 599, 4672, 225, 309, 261, 8944, 422, 296, 9536, 11, 471, 642, 18, 29187, 67, 1893, 67, 26...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 267, 67, 718, 1098, 12, 718, 16, 527, 67, 1726, 16, 4073, 16, 1247, 67, 718, 273, 599, 4672, 225, 309, 261, 8944, 422, 296, 9536, 11, 471, 642, 18, 29187, 67, 1893, 67, 26...
more_attr += col_name + ' ' + attr_dict['sql']
more_attr += col_name + ' ' + col_dict['sql']
def add_column(self, table, col_name, attr_dict, default='NULL'): """ Takes a while, thanks to SQLite... """ # Check input: if not self.__skeleton__.has_key(table): raise ValueError("Database has no table %s."%table) if self.__skeleton__[table].has_key(col_name): raise ValueError("Table %s already has column %s."%(table,col_name)) attr_dict = verify_column(attr_dict) # Get an ordered list: cur_list = skel_to_col_attr_list(self.__skeleton__[table]) # Update the skeleton: self.__skeleton__[table][col_name] = attr_dict original = '' for col in cur_list: original += col[0] +', ' original = original.rstrip(', ') more = original + ', ' + col_name more_attr = '' for col in cur_list: if col[2]: # If primary key: more_attr += col[0] + ' ' + col[1] + ' primary key, ' else: more_attr += col[0] + ' ' + col[1] + ', ' more_attr += col_name + ' ' + attr_dict['sql'] # ROBERT: Look at the new fun way to do this... # executescript runs a begin transaction and commit so this # should speed things up for even large amounts of data # Silly SQLite -- we have to make a temp table to hold info... self.__connection__.executescript(""" create temporary table spam(%s); insert into spam select %s, %s from %s; drop table %s; create table %s (%s); """%(more_attr, original, default, table, table, table, more_attr)) # Update indices in new table new_table_set_col_attr(self.__connection__, table, self.__skeleton__[table]) # Now we can plop our data into the *new* table: self.__connection__.executescript(""" insert into %s select %s from spam; drop table spam; """%(table, more)) self.vacuum()
5ddf79d83938a9c0191ec3af2f60e264a74ecef9 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9890/5ddf79d83938a9c0191ec3af2f60e264a74ecef9/database.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 2827, 12, 2890, 16, 1014, 16, 645, 67, 529, 16, 1604, 67, 1576, 16, 805, 2218, 8560, 11, 4672, 3536, 23004, 279, 1323, 16, 286, 19965, 358, 16192, 2777, 225, 3536, 468, 2073, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 2827, 12, 2890, 16, 1014, 16, 645, 67, 529, 16, 1604, 67, 1576, 16, 805, 2218, 8560, 11, 4672, 3536, 23004, 279, 1323, 16, 286, 19965, 358, 16192, 2777, 225, 3536, 468, 2073, ...
[1, ex[0], ey[0], 0, 0, 0], [0, 0, 0, 1, ex[0], ey[0]], [1, ex[1], ey[1], 0, 0, 0], [0, 0, 0, 1, ex[1], ey[1]], [1, ex[2], ey[2], 0, 0, 0], [0, 0, 0, 1, ex[2], ey[2]]
[ 1, ex[0], ey[0], 0, 0, 0 ], [ 0, 0, 0, 1, ex[0], ey[0]], [ 1, ex[1], ey[1], 0, 0, 0 ], [ 0, 0, 0, 1, ex[1], ey[1]], [ 1, ex[2], ey[2], 0, 0, 0 ], [ 0, 0, 0, 1, ex[2], ey[2]]
def plantf(ex,ey,ep,es): """ Compute internal element force vector in a triangular element in plane stress or plane strain. Parameters: ex = [x1,x2,x3] node coordinates ey = [y1,y2,y3] ep = [ptype,t] ptype: analysis type t: thickness es = [[sigx,sigy,[sigz],tauxy] element stress matrix [ ...... ]] one row for each element OUTPUT: fe = [[f1],[f2],...,[f8]] internal force vector """ ptype,t = ep colD = es.shape[1] #--------- plane stress -------------------------------------- if ptype == 1: C = mat([ [1, ex[0], ey[0], 0, 0, 0], [0, 0, 0, 1, ex[0], ey[0]], [1, ex[1], ey[1], 0, 0, 0], [0, 0, 0, 1, ex[1], ey[1]], [1, ex[2], ey[2], 0, 0, 0], [0, 0, 0, 1, ex[2], ey[2]] ]) A = 0.5*linalg.det(mat([ [1, ex[0], ey[0]], [1, ex[1], ey[1]], [1, ex[2], ey[2]] ])) B = mat([ [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 0] ])*linalg.inv(C) if colD > 3: stress = asmatrix(es[ix_((0,1,3))]) else: stress = asmatrix(es) ef = (A*t*B.T*stress.T).T return reshape(asarray(ef),6) #--------- plane strain -------------------------------------- elif ptype == 2: C = mat([ [1, ex[0], ey[0], 0, 0, 0], [0, 0, 0, 1, ex[0], ey[0]], [1, ex[1], ey[1], 0, 0, 0], [0, 0, 0, 1, ex[1], ey[1]], [1, ex[2], ey[2], 0, 0, 0], [0, 0, 0, 1, ex[2], ey[2]] ]) A = 0.5*linalg.det(mat([ [1, ex[0], ey[0]], [1, ex[1], ey[1]], [1, ex[2], ey[2]] ])) B = mat([ [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 0] ])*linalg.inv(C) if colD > 3: stress = asmatrix(es[ix_((1,2,4))]) else: stress = asmatrix(es) ef = (A*t*B.T*stress.T).T return reshape(asarray(ef),6) else: print "Error ! Check first argument, ptype=1 or 2 allowed" return None
802fb797c1c505715968b6bf327cad5ed7e0e712 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1235/802fb797c1c505715968b6bf327cad5ed7e0e712/pycalfem.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 886, 970, 74, 12, 338, 16, 402, 16, 881, 16, 281, 4672, 3536, 8155, 2713, 930, 2944, 3806, 316, 279, 6882, 13077, 930, 316, 11017, 384, 663, 578, 11017, 16853, 18, 225, 7012, 30, 225, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 886, 970, 74, 12, 338, 16, 402, 16, 881, 16, 281, 4672, 3536, 8155, 2713, 930, 2944, 3806, 316, 279, 6882, 13077, 930, 316, 11017, 384, 663, 578, 11017, 16853, 18, 225, 7012, 30, 225, ...
yrestarr.append(self.yresult)
yresarr.append(self.yresult)
def saveresultsinhdf5(self, filename, filemode): """Save simulation results in a HDF5 format file with filename. filename - full path and name of file (should end in hf5 for consistency. filemode - ["w"|"a"]: "w" specifies write to a new file, overwriting existing one "a" specifies append to current file or create if does not exist. """ #Check whether we should store ks and set group name accordingly if self.k is None: grpname = "bgresults" else: grpname = "results" try: rf = tables.openFile(filename, filemode) try: if filemode is "w": #Create groups required resgroup = rf.createGroup(rf.root, grpname, "Results of simulation") tresarr = rf.createArray(resgroup, "tresult", self.tresult) yresarr = rf.createEArray(resgroup, "yresult", tables.Float64Atom(), self.yresult[:,:,0:0].shape) paramstab = rf.createTable(resgroup, "parameters", self.gethf5paramsdict()) #Need to check if results are k dependent if grpname is "results": foystarr = rf.createEArray(resgroup, "foystart", tables.Float64Atom(), self.foystart[:,0:0].shape) fotstarr = rf.createEArray(resgroup, "fotstart", tables.Float64Atom(), self.yresult[:,:,0:0].shape) karr = rf.createEArray(resgroup, "k", tables.Float64Atom(), (0,)) elif filemode is "a": try: resgroup = rf.getNode(rf.root, grpname) paramstab = resgroup.parameters yresarr = resgroup.yresult tres = resgroup.tresult[:] if grpname is "results": foystarr = resgroup.foystart fotstarr = resgroup.fotstart karr = regroup.k except NoSuchNodeError: raise IOError("File is not in correct format! Correct results tables do not exist!") if N.shape(tres) != N.shape(self.tresult): raise IOError("Results file has different size of tresult!") else: raise IOError("Can only write or append to files!") #Now save data #Save parameters paramstabrow = paramstab.row params = self.callingparams() for key in params: paramstabrow[key] = params[key] paramstabrow.append() #Add to table paramstab.flush() #Save yresults yrestarr.append(self.yresult) if grpname is "results": karr.append(self.k) foystarr.append(self.foystart) fotstarr.append(self.fotstart) rf.flush() #Log success self._log.debug("Successfully wrote results to file " + filename) finally: rf.close() except IOError: raise
fbfb3bd10f1e28b79d13aaa1ffa69f964c6b3160 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/7283/fbfb3bd10f1e28b79d13aaa1ffa69f964c6b3160/cosmomodels.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7864, 502, 281, 406, 21861, 26428, 25, 12, 2890, 16, 1544, 16, 585, 3188, 4672, 3536, 4755, 14754, 1686, 316, 279, 21673, 25, 740, 585, 598, 1544, 18, 1544, 300, 1983, 589, 471, 508, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7864, 502, 281, 406, 21861, 26428, 25, 12, 2890, 16, 1544, 16, 585, 3188, 4672, 3536, 4755, 14754, 1686, 316, 279, 21673, 25, 740, 585, 598, 1544, 18, 1544, 300, 1983, 589, 471, 508, 4...
if not self.is_active(): return False self._screen.get_crtc_by_xid(self.get_crtc()).disable()
if not self.is_active(): return self._mode = None self._crtc._outputs.remove(self) self._crtc = None self._changes = self._changes | CHANGES_CRTC | CHANGES_MODE
def disable(self): """Disables the output""" if not self.is_active(): return False #FIXME: Check for other outputs that are connected on the same crtc self._screen.get_crtc_by_xid(self.get_crtc()).disable()
0e41f2a133d91b605652655ee11819e181c12a3b /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/11047/0e41f2a133d91b605652655ee11819e181c12a3b/xrandr.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4056, 12, 2890, 4672, 3536, 1669, 1538, 326, 876, 8395, 309, 486, 365, 18, 291, 67, 3535, 13332, 327, 1083, 468, 25810, 30, 2073, 364, 1308, 6729, 716, 854, 5840, 603, 326, 1967, 30677, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4056, 12, 2890, 4672, 3536, 1669, 1538, 326, 876, 8395, 309, 486, 365, 18, 291, 67, 3535, 13332, 327, 1083, 468, 25810, 30, 2073, 364, 1308, 6729, 716, 854, 5840, 603, 326, 1967, 30677, ...
if fromitem(item, LOWERREQS): for i in range(LAYER_RIGHTHAND, LAYER_MOUNT): litem = char.itemonlayer(i) if litem: litem.resendtooltip()
def onEquip(char, item, layer): changed = 0 # Resend all equipment with requirements if fromitem(item, LOWERREQS): for i in range(LAYER_RIGHTHAND, LAYER_MOUNT): litem = char.itemonlayer(i) if litem: litem.resendtooltip() # Bonus Strength if item.hastag('boni_str'): char.strength = char.strength + int(item.gettag('boni_str')) changed = 1 # Bonus Dex if item.hastag('boni_dex'): char.dexterity = char.dexterity + int(item.gettag('boni_dex')) changed = 1 # Bonus Int if item.hastag('boni_int'): char.intelligence = char.intelligence + int(item.gettag('boni_int')) changed = 1 # Add hitpoint regeneration rate bonus if item.hastag('regenhitpoints'): if char.hastag('regenhitpoints'): regenhitpoints = int(char.gettag('regenhitpoints')) + int(item.gettag('regenhitpoints')) else: regenhitpoints = int(item.gettag('regenhitpoints')) char.settag('regenhitpoints', regenhitpoints) # Add stamina regeneration rate bonus if item.hastag('regenstamina'): if char.hastag('regenstamina'): regenstamina = int(char.gettag('regenstamina')) + int(item.gettag('regenstamina')) else: regenstamina = int(item.gettag('regenstamina')) char.settag('regenstamina', regenstamina) # Update Stats if changed: char.updatestats()
768966737612f42a199c2f6cffe24c72d0fb439e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2534/768966737612f42a199c2f6cffe24c72d0fb439e/equipment.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 13142, 625, 12, 3001, 16, 761, 16, 3018, 4672, 3550, 273, 374, 225, 468, 1124, 409, 777, 1298, 11568, 598, 8433, 7, 605, 22889, 934, 13038, 309, 761, 18, 76, 689, 346, 2668, 18688...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 13142, 625, 12, 3001, 16, 761, 16, 3018, 4672, 3550, 273, 374, 225, 468, 1124, 409, 777, 1298, 11568, 598, 8433, 7, 605, 22889, 934, 13038, 309, 761, 18, 76, 689, 346, 2668, 18688...
raise NotImplementedException()
raise NotImplementedError()
def getParser(self): raise NotImplementedException()
03ad1fcb5b9c995bdcae5e56834a1418f0c71c32 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2259/03ad1fcb5b9c995bdcae5e56834a1418f0c71c32/converter.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 20804, 12, 2890, 4672, 1002, 10051, 503, 1435, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 20804, 12, 2890, 4672, 1002, 10051, 503, 1435, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
bp = bdb.Breakpoint.bpbynumber[bpnum]
try: bp = bdb.Breakpoint.bpbynumber[bpnum] except IndexError: print >>self.stdout, 'Breakpoint index %r is not valid' % args[0] return
def do_condition(self, arg): # arg is breakpoint number and condition args = arg.split(' ', 1) try: bpnum = int(args[0].strip()) except ValueError: # something went wrong print >>self.stdout, \ 'Breakpoint index %r is not a number' % args[0] return try: cond = args[1] except: cond = None bp = bdb.Breakpoint.bpbynumber[bpnum] if bp: bp.cond = cond if not cond: print >>self.stdout, 'Breakpoint', bpnum, print >>self.stdout, 'is now unconditional.'
acf441ff19c02e760baf57d83b27e8dfaeee0c56 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/8125/acf441ff19c02e760baf57d83b27e8dfaeee0c56/pdb.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 67, 4175, 12, 2890, 16, 1501, 4672, 468, 1501, 353, 18820, 1300, 471, 2269, 833, 273, 1501, 18, 4939, 2668, 2265, 404, 13, 775, 30, 9107, 2107, 273, 509, 12, 1968, 63, 20, 8009, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 67, 4175, 12, 2890, 16, 1501, 4672, 468, 1501, 353, 18820, 1300, 471, 2269, 833, 273, 1501, 18, 4939, 2668, 2265, 404, 13, 775, 30, 9107, 2107, 273, 509, 12, 1968, 63, 20, 8009, ...
class pAdicGeneralExtensionRingLazy(pAdicGeneralExtensionGeneric, pAdicLazyRingGeneric): def __init__(self, upoly, epoly, poly, prec, halt, print_mode, names): pAdicGeneralExtensionGeneric.__init__(self, upoly, epoly, poly, prec, print_mode, names, pAdicGeneralExtensionLazyElement) pAdicLazyRingGeneric.__init__(self, upoly.base_ring().prime(), prec, print_mode, names, halt)
def __init__(self, upoly, epoly, poly, prec, halt, print_mode, names): pAdicGeneralExtensionGeneric.__init__(self, upoly, epoly, poly, prec, print_mode, names, pAdicGeneralExtensionCappedRelativeElement)
b9ffa8b1f92ad68de3210909edfe6b8fd5f5e7af /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9417/b9ffa8b1f92ad68de3210909edfe6b8fd5f5e7af/padic_extension_leaves.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 731, 355, 93, 16, 425, 16353, 16, 7573, 16, 13382, 16, 18389, 16, 1172, 67, 3188, 16, 1257, 4672, 293, 1871, 335, 12580, 3625, 7014, 16186, 2738, 972, 12...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 731, 355, 93, 16, 425, 16353, 16, 7573, 16, 13382, 16, 18389, 16, 1172, 67, 3188, 16, 1257, 4672, 293, 1871, 335, 12580, 3625, 7014, 16186, 2738, 972, 12...
marcher.march( marchBound, marchResolution )
marcher.march( marchBound, marchResolution, -0.000001 )
def testUnion( self ) : """ Test implicit surface CSG union """ sphere1 = SphereImplicitSurfaceFunctionV3ff( V3f(0,0,0), 1 ) sphere2 = SphereImplicitSurfaceFunctionV3ff( V3f(0,1,0), 1 ) csgFn = CSGImplicitSurfaceFunctionV3ff( sphere1, sphere2, CSGImplicitSurfaceFunctionV3ff.Mode.Union ) builder = MeshPrimitiveBuilderf() marcher = MarchingCubesf( csgFn, builder ) marchMin = V3f(-2.5, -2.5, -2.5) marchMax = V3f( 2.5, 2.5, 2.5) marchBound = Box3f( marchMin, marchMax ) marchResolution = V3i( 30, 30, 30 ) marcher.march( marchBound, marchResolution ) m = builder.mesh() # Verified visually self.assertEqual( len( m.vertexIds ), 5760 )
8913efd88a1927c6feb385fcacb4a7ef5c95264e /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9042/8913efd88a1927c6feb385fcacb4a7ef5c95264e/CSGImplicitSurfaceFunction.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 14325, 12, 365, 262, 294, 3536, 7766, 10592, 9034, 6761, 43, 7812, 3536, 20041, 21, 273, 348, 9346, 15787, 11508, 2083, 58, 23, 1403, 12, 776, 23, 74, 12, 20, 16, 20, 16, 20, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 14325, 12, 365, 262, 294, 3536, 7766, 10592, 9034, 6761, 43, 7812, 3536, 20041, 21, 273, 348, 9346, 15787, 11508, 2083, 58, 23, 1403, 12, 776, 23, 74, 12, 20, 16, 20, 16, 20, 3...
subfolder = self._box.add_folder('subfolder')
if not hasattr(os, "stat") or not hasattr(os, "umask"): return orig_umask = os.umask(0) try: subfolder = self._box.add_folder('subfolder') finally: os.umask(orig_umask)
def test_file_perms(self): # From bug #3228, we want to verify that the file created inside a Maildir # subfolder isn't marked as executable. subfolder = self._box.add_folder('subfolder') path = os.path.join(subfolder._path, 'maildirfolder') st = os.stat(path) perms = st.st_mode self.assertFalse((perms & 0111)) # Execute bits should all be off.
b92cf32511f6406399e4ad31a75017e9dc97f41e /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/3187/b92cf32511f6406399e4ad31a75017e9dc97f41e/test_mailbox.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 768, 67, 15969, 12, 2890, 4672, 468, 6338, 7934, 468, 1578, 6030, 16, 732, 2545, 358, 3929, 716, 326, 585, 2522, 4832, 279, 490, 69, 545, 481, 468, 31001, 5177, 1404, 9350, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 768, 67, 15969, 12, 2890, 4672, 468, 6338, 7934, 468, 1578, 6030, 16, 732, 2545, 358, 3929, 716, 326, 585, 2522, 4832, 279, 490, 69, 545, 481, 468, 31001, 5177, 1404, 9350, 4...
modcomponents = container.name().split('.')
modcomponents = container_name.split('.')
def findUID(name, container, docmap=None): """ Attempt to find the UID for the object that can be accessed with the name C{name} from the module C{module}. @param name: The name used to identify the object. @type name: C{string} @param container: The UID of the class or module containing the object. @type container: L{UID} @param docmap: A documentation map, which is used to check if C{name} is the name of a module variable, class variable, or instance variable. @type docmap: L{objdoc.DocMap} @return: The UID for the object that can be accessed with the name C{name} from the module C{module}; or C{None} if no object was found. @rtype: L{UID} or C{None} """ if name == '': return None if container is None: return None if not (container.is_module() or container.is_class()): raise ValueError('Bad container %r' % container) # Is it the short name for a member of the containing class? if container.is_class(): if _is_variable_in(name, container, docmap): val = None # it may not be a real object return make_uid(val, container, name) elif container.value().__dict__.has_key(name): cls = container.value() obj = cls.__dict__[name] if type(obj) is _FunctionType: return make_uid(new.instancemethod(obj, None, cls), container, name) else: return make_uid(obj, container, name) else: container = container.module() module = container.value() components = name.split('.') # Is it a variable in the containing module? if _is_variable_in(name, container, docmap): val = None # it may not be a real object return make_uid(val, container, name) # Is it an object in the containing module? try: obj = module for component in components: obj_parent = obj obj_name = component try: obj = obj.__dict__[component] except: raise KeyError() try: return make_uid(obj, make_uid(obj_parent), obj_name) except: pass except KeyError: pass # Is it a module name? The module name may be relative to the # containing module, or any of its ancestors. modcomponents = container.name().split('.') for i in range(len(modcomponents)-1, -1, -1): try: modname = '.'.join(modcomponents[:i]+[name]) return(make_uid(import_module(modname))) except: pass # Is it an object in a module? The module part of the name may be # relative to the containing module, or any of its ancestors. modcomponents = container.name().split('.') for i in range(len(modcomponents)-1, -1, -1): for j in range(len(components)-1, 0, -1): try: modname = '.'.join(modcomponents[:i]+components[:j]) objname = '.'.join(components[j:]) mod = import_module(modname) if _is_variable_in(name, make_uid(mod), docmap): val = None # it may not be a real object return make_uid(val, container, name) obj = getattr(import_module(modname), objname) return make_uid(obj, make_uid(mod), objname) except: pass # We couldn't find it; return None. return None
f0b775424bcce0909bd1f7a706c6cfdc3470be31 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11420/f0b775424bcce0909bd1f7a706c6cfdc3470be31/uid.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 3060, 12, 529, 16, 1478, 16, 997, 1458, 33, 7036, 4672, 3536, 12864, 358, 1104, 326, 10034, 364, 326, 733, 716, 848, 506, 15539, 598, 326, 508, 385, 95, 529, 97, 628, 326, 1605, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 3060, 12, 529, 16, 1478, 16, 997, 1458, 33, 7036, 4672, 3536, 12864, 358, 1104, 326, 10034, 364, 326, 733, 716, 848, 506, 15539, 598, 326, 508, 385, 95, 529, 97, 628, 326, 1605, ...
argument = pvalue.strip('"').replace('\\"', '"')
argument = pvalue.strip('"').replace('\\"', '"').replace('\\\\', '\\')
def revert_commandparams(document): regex = re.compile(r'(\S+)\s+(.+)') i = 0 while 1: i = find_token(document.body, "\\begin_inset LatexCommand", i) if i == -1: break name = document.body[i].split()[2] j = find_end_of_inset(document.body, i + 1) preview_line = "" option1 = "" option2 = "" argument = "" for k in range(i + 1, j): match = re.match(regex, document.body[k]) if match: pname = match.group(1) pvalue = match.group(2) if pname == "preview": preview_line = document.body[k] elif (commandparams_info[name][0] != "" and pname == commandparams_info[name][0]): option1 = pvalue.strip('"').replace('\\"', '"') elif (commandparams_info[name][1] != "" and pname == commandparams_info[name][1]): option2 = pvalue.strip('"').replace('\\"', '"') elif (commandparams_info[name][2] != "" and pname == commandparams_info[name][2]): argument = pvalue.strip('"').replace('\\"', '"') elif document.body[k].strip() != "": document.warning("Ignoring unknown contents `%s' in command inset %s." % (document.body[k], name)) if name == "bibitem": if option1 == "": lines = ["\\bibitem {%s}" % argument] else: lines = ["\\bibitem [%s]{%s}" % (option1, argument)] else: if option1 == "": if option2 == "": lines = ["\\begin_inset LatexCommand \\%s{%s}" % (name, argument)] else: lines = ["\\begin_inset LatexCommand \\%s[][%s]{%s}" % (name, option2, argument)] else: if option2 == "": lines = ["\\begin_inset LatexCommand \\%s[%s]{%s}" % (name, option1, argument)] else: lines = ["\\begin_inset LatexCommand \\%s[%s][%s]{%s}" % (name, option1, option2, argument)] if name != "bibitem": if preview_line != "": lines.append(preview_line) lines.append('') lines.append('\\end_inset') document.body[i:j+1] = lines i = j + 1
7feaac16d224b7adb406eb79810b2b84e4816ca0 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/7514/7feaac16d224b7adb406eb79810b2b84e4816ca0/lyx_1_5.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 15226, 67, 3076, 2010, 12, 5457, 4672, 3936, 273, 283, 18, 11100, 12, 86, 11, 4713, 55, 14456, 87, 15, 21133, 2506, 13, 277, 273, 374, 1323, 404, 30, 277, 273, 1104, 67, 2316, 12, 54...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 15226, 67, 3076, 2010, 12, 5457, 4672, 3936, 273, 283, 18, 11100, 12, 86, 11, 4713, 55, 14456, 87, 15, 21133, 2506, 13, 277, 273, 374, 1323, 404, 30, 277, 273, 1104, 67, 2316, 12, 54...
return self.enclosed_ro.match(data)
return self.enclosed_ro.search(data)
def match_complete (self, pos, tagbuf): """ We know that the tag (and tag attributes) match. Now match the enclosing block. Return True on a match. """ if not self.enclosed: # no enclosed expression => match return True # put buf items together for matching items = tagbuf[pos:] data = wc.filter.html.tagbuf2data(items, StringIO()).getvalue() return self.enclosed_ro.match(data)
7dd938cf2887c827895292966733d8e0500470da /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/7dd938cf2887c827895292966733d8e0500470da/HtmlrewriteRule.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 845, 67, 6226, 261, 2890, 16, 949, 16, 1047, 4385, 4672, 3536, 1660, 5055, 716, 326, 1047, 261, 464, 1047, 1677, 13, 845, 18, 4494, 845, 326, 16307, 1203, 18, 2000, 1053, 603, 279, 845...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 845, 67, 6226, 261, 2890, 16, 949, 16, 1047, 4385, 4672, 3536, 1660, 5055, 716, 326, 1047, 261, 464, 1047, 1677, 13, 845, 18, 4494, 845, 326, 16307, 1203, 18, 2000, 1053, 603, 279, 845...
value.append(any.exportToXml()) value = ''.join(value) attributes[(name, format)].append(value.decode('utf8'))
content.append(any.exportToXml()) content = ''.join(content) attributes[(name, format)].append(content.decode('utf8'))
def sso_after_response(request, login, relay_state = None): s = get_service_provider_settings() if not s: return error_page(request, _('Service provider not configured')) # If there is no inResponseTo: IDP initiated # else, check that the response id is the same irt = None try: irt = login.response.assertion[0].subject.subjectConfirmation.subjectConfirmationData.inResponseTo except: return error_page(request, _('SSO/sso_after_response: No Response ID')) if irt and not check_response_id(login): return error_page(request, _('SSO/sso_after_response: Request and Response ID do not match')) #TODO: Register assertion and check for replay assertion = login.response.assertion[0] if not assertion: return error_page(request, _('SSO/sso_after_response: Assertion missing')) # Check: Check that the url is the same as in the assertion try: if assertion.subject.subjectConfirmation.subjectConfirmationData.recipient != \ request.build_absolute_uri().partition('?')[0]: return error_page(request, _('SSO/sso_after_response: SubjectConfirmation Recipient Mismatch')) except: return error_page(request, _('SSO/sso_after_response: Errot checking SubjectConfirmation Recipient')) # Check: SubjectConfirmation try: if assertion.subject.subjectConfirmation.method != \ 'urn:oasis:names:tc:SAML:2.0:cm:bearer': return error_page(request, _('SSO/sso_after_response: Unknown SubjectConfirmation Method')) except: return error_page(request, _('SSO/sso_after_response: Error checking SubjectConfirmation Method')) # Check: AudienceRestriction try: audience_ok = False for audience_restriction in assertion.conditions.audienceRestriction: if audience_restriction.audience != login.server.providerId: return error_page(request, _('SSO/sso_after_response: Incorrect AudienceRestriction')) audience_ok = True if not audience_ok: return error_page(request, _('SSO/sso_after_response: Incorrect AudienceRestriction')) except: return error_page(request, _('SSO/sso_after_response: Error checking AudienceRestriction')) # Check: notBefore, notOnOrAfter now = datetime.datetime.utcnow() try: not_before = assertion.subject.subjectConfirmation.subjectConfirmationData.notBefore except: return error_page(request, _('SSO/sso_after_response: missing subjectConfirmationData')) not_on_or_after = assertion.subject.subjectConfirmation.subjectConfirmationData.notOnOrAfter if irt: if not_before is not None: return error_page(request, _('SSO/sso_after_response: assertion in response to an AuthnRequest, notBefore MUST not be present in SubjectConfirmationData')) elif not_before is None or not not_before.endswith('Z'): return error_page(request, _('SSO/sso_after_response: invalid notBefore value ' + not_before)) if not_on_or_after is None or not not_on_or_after.endswith('Z'): return error_page(request, _('SSO/sso_after_response: invalid notOnOrAfter value')) try: if not_before and now < datetime.datetime.fromtimestamp(time.mktime(time.strptime(not_before,"%Y-%m-%dT%H:%M:%S"))): return error_page(request, _('SSO/sso_after_response: Assertion received too early')) except: return error_page(request, _('SSO/sso_after_response: invalid notBefore value ' + notBefore)) try: if not_on_or_after and now > iso8601_to_datetime(not_on_or_after): return error_page(request, _('SSO/sso_after_response: Assertion expired')) except: return error_page(request, _('SSO/sso_after_response: invalid notOnOrAfter value')) try: login.acceptSso() except lasso.Error, error: return error_page(request, _('SSO/sso_after_response: %s') %lasso.strError(error[0])) attributes = {} for att_statement in login.assertion.attributeStatement: for attribute in att_statement.attribute: try: name, format, nickname = attribute.name.decode('ascii'), attribute.nameFormat.decode('ascii'), \ attribute.friendlyName except UnicodeDecodeError: message = 'SSO/sso_after_response: name or format of an attribute failed to decode as ascii: %r %r' logging.error(message % (attribute.name, attribute.format)) continue try: values = attribute.value if values: attributes[(name, format)] = [] if nickname: attributes[nickname] = attributes[(name, format)] for value in values: value = [] for any in value.any: value.append(any.exportToXml()) value = ''.join(value) attributes[(name, format)].append(value.decode('utf8')) except UnicodeDecodeError: message = 'SSO/sso_after_response: attribute value is not utf8 encoded %r' logging.error(message % value) continue user = request.user if not request.user.is_anonymous(): ''' The user is logged in. The user may be already logged and yet may have performed a SSO. - Either with a transient nameID: To bring a membership credential for instance. - Or with a persistent nameID: idem + to add a new federation. ''' #TODO: If transient nameID and logged user, only logging if login.nameIdentifier.format == \ lasso.SAML2_NAME_IDENTIFIER_FORMAT_TRANSIENT: return error_page(request, _('Transient account policy not yet implemented')) fed = lookup_federation_by_name_identifier(login) if fed: save_session(request, login) save_federation(request, login) maintain_liberty_session_on_service_provider(request, login) return redirect_to_target(request) else: fed = add_federation(user, login) if not fed: return error_page(request, _('Erreur adding new federation for this user')) save_session(request, login) save_federation(request, login) maintain_liberty_session_on_service_provider(request, login) return redirect_to_target(request) else: ''' Else the user is logged out. - Either with a transient nameID: - We create a temporary session. - Or we ask for an authentication. - Or with a persistent nameID: - If already federated: login. - Else: - We ask for an account linking. - Or we create an account with this federation. ''' if login.nameIdentifier.format == \ lasso.SAML2_NAME_IDENTIFIER_FORMAT_TRANSIENT: if s.handle_transient == 'AUTHSAML2_UNAUTH_TRANSIENT_ASK_AUTH': return error_page(request, _('Transient access policy not yet implemented')) if s.handle_transient == 'AUTHSAML2_UNAUTH_TRANSIENT_OPEN_SESSION': #TODO: Logging from backends import AuthSAML2Backend user = AuthSAML2Backend().create_user(nameId=login.nameIdentifier.content) key = request.session.session_key auth_login(request, user) signals.auth_login.send(sender=None, request=request, attributes=attributes) if request.session.test_cookie_worked(): request.session.delete_test_cookie() save_session(request, login) return redirect_to_target(request) return error_page(request, _('Transient access policy: Configuration error')) if login.nameIdentifier.format == \ lasso.SAML2_NAME_IDENTIFIER_FORMAT_PERSISTENT: from backends import AuthSAML2Backend user = AuthSAML2Backend().authenticate(request,login) if user: key = request.session.session_key auth_login(request, user) signals.auth_login.send(sender=None, request=request, attributes=attributes) if request.session.test_cookie_worked(): request.session.delete_test_cookie() save_session(request, login) save_federation(request, login) maintain_liberty_session_on_service_provider(request, login) return redirect_to_target(request, key) if s.handle_persistent == 'AUTHSAML2_UNAUTH_PERSISTENT_ACCOUNT_LINKING_BY_AUTH': register_federation_in_progress(request,login.nameIdentifier.content) auth_login(request, user) signals.auth_login.send(sender=None, request=request, attributes=attributes) save_session(request, login) save_federation_temp(request, login) maintain_liberty_session_on_service_provider(request, login) return render_to_response('auth/saml2/account_linking.html', context_instance=RequestContext(request)) if s.handle_persistent == 'AUTHSAML2_UNAUTH_PERSISTENT_CREATE_USER_PSEUDONYMOUS': user = AuthSAML2Backend().create_user(nameId=login.nameIdentifier.content) key = request.session.session_key auth_login(request, user) signals.auth_login.send(sender=None, request=request, attributes=attributes) if request.session.test_cookie_worked(): request.session.delete_test_cookie() save_session(request, login) maintain_liberty_session_on_service_provider(request, login) return redirect_to_target(request, key) return error_page(request, _('Persistent Account policy: Configuration error')) return error_page(request, _('Transient access policy: NameId format not supported')) #TODO: Relay state
7cd7e7964660fa014725c985c74ccf06b2834dcf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11208/7cd7e7964660fa014725c985c74ccf06b2834dcf/saml2_endpoints.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 27250, 67, 5205, 67, 2740, 12, 2293, 16, 3925, 16, 18874, 67, 2019, 273, 599, 4672, 272, 273, 336, 67, 3278, 67, 6778, 67, 4272, 1435, 309, 486, 272, 30, 327, 555, 67, 2433, 12, 2293...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 27250, 67, 5205, 67, 2740, 12, 2293, 16, 3925, 16, 18874, 67, 2019, 273, 599, 4672, 272, 273, 336, 67, 3278, 67, 6778, 67, 4272, 1435, 309, 486, 272, 30, 327, 555, 67, 2433, 12, 2293...
universal_newlines=True)
universal_newlines=True, silent_ok=True)
def GetBaseFile(self, filename): status = self.GetStatus(filename) base_content = None new_content = None
eb832f5bde044b8ab2c68c9bb98940bdbe9b728b /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/476/eb832f5bde044b8ab2c68c9bb98940bdbe9b728b/upload.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 968, 2171, 812, 12, 2890, 16, 1544, 4672, 1267, 273, 365, 18, 967, 1482, 12, 3459, 13, 1026, 67, 1745, 273, 599, 394, 67, 1745, 273, 599, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 968, 2171, 812, 12, 2890, 16, 1544, 4672, 1267, 273, 365, 18, 967, 1482, 12, 3459, 13, 1026, 67, 1745, 273, 599, 394, 67, 1745, 273, 599, 2, -100, -100, -100, -100, -100, -100, -100, ...
th = contol / 2.0
th = 0.0
def _getDirection(self, approach): if hasattr(self, 'direction'): return self.direction.copy() p = self.p contol = p.contol maxRes, fname, ind = self.mr_alt(1) x = self.x if self.isFeas(altLinInEq=True): #or (useCurrentBestFeasiblePoint and hasattr(p, 'currentBestFeasiblePoint') and self.f() - p.currentBestFeasiblePoint.f() > self.mr()): #if (maxRes <= p.contol and all(isfinite(self.df())) and (p.isNaNInConstraintsAllowed or self.nNaNs() == 0)) : self.direction, self.dType = self.df(),'f' return self.direction.copy() else: if approach == 'all active':
7c402b86c7f3b43aff3ac17f09053cdbf8ac261d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6196/7c402b86c7f3b43aff3ac17f09053cdbf8ac261d/Point.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 8212, 12, 2890, 16, 17504, 4672, 309, 3859, 12, 2890, 16, 296, 9855, 11, 4672, 327, 365, 18, 9855, 18, 3530, 1435, 293, 273, 365, 18, 84, 466, 355, 273, 293, 18, 1213, 355,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 8212, 12, 2890, 16, 17504, 4672, 309, 3859, 12, 2890, 16, 296, 9855, 11, 4672, 327, 365, 18, 9855, 18, 3530, 1435, 293, 273, 365, 18, 84, 466, 355, 273, 293, 18, 1213, 355,...
self.silc.command_call('TOPIC %s %s' % (strip_leading_hash(msg.args[0]), msg.args[1]))
if len(msg.args) > 1: self.silc.command_call('TOPIC %s %s' % (strip_leading_hash(msg.args[0]), msg.args[1])) else: self.silc.command_call('TOPIC %s' % strip_leading_hash(msg.args[0]))
def do_TOPIC(self, msg): self.silc.command_call('TOPIC %s %s' % (strip_leading_hash(msg.args[0]), msg.args[1]))
1bc8d60672688540dbe014cec953f3b1bd2e9462 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2179/1bc8d60672688540dbe014cec953f3b1bd2e9462/Silc.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 67, 4296, 20385, 12, 2890, 16, 1234, 4672, 365, 18, 25119, 71, 18, 3076, 67, 1991, 2668, 4296, 20385, 738, 87, 738, 87, 11, 738, 261, 6406, 67, 27200, 67, 2816, 12, 3576, 18, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 67, 4296, 20385, 12, 2890, 16, 1234, 4672, 365, 18, 25119, 71, 18, 3076, 67, 1991, 2668, 4296, 20385, 738, 87, 738, 87, 11, 738, 261, 6406, 67, 27200, 67, 2816, 12, 3576, 18, 19...
new_doc.childNodes[0].appendChild(config) header = new_doc.createElement("header") for f in fields: field = new_doc.createElement("field") field_txt = new_doc.createTextNode('%s' % (f['name'],)) field.appendChild(field_txt) header.appendChild(field) new_doc.childNodes[0].appendChild(header) lines = new_doc.createElement("lines")
new_doc.append(config) header = etree.Element("header") for f in fields: field = etree.Element("field") field.text = f['name'] header.append(field) new_doc.append(header) lines = etree.Element("lines")
def _append_node(name, text): n = new_doc.createElement(name) t = new_doc.createTextNode(text) n.appendChild(t) config.appendChild(n)
ce28b92c9e314579334271a58fcf4c5c65873e36 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7397/ce28b92c9e314579334271a58fcf4c5c65873e36/custom.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 6923, 67, 2159, 12, 529, 16, 977, 4672, 290, 273, 394, 67, 2434, 18, 2640, 1046, 12, 529, 13, 268, 273, 394, 67, 2434, 18, 2640, 17299, 12, 955, 13, 290, 18, 6923, 1763, 12, 8...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 6923, 67, 2159, 12, 529, 16, 977, 4672, 290, 273, 394, 67, 2434, 18, 2640, 1046, 12, 529, 13, 268, 273, 394, 67, 2434, 18, 2640, 17299, 12, 955, 13, 290, 18, 6923, 1763, 12, 8...
print "Total Game:" total_game.dump(" ")
def dump(self, header): game_count = float(self.count)
4788ddf5016880c1fe8c5fbca5e01c7157624fa5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/600/4788ddf5016880c1fe8c5fbca5e01c7157624fa5/process.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4657, 12, 2890, 16, 1446, 4672, 7920, 67, 1883, 273, 1431, 12, 2890, 18, 1883, 13, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4657, 12, 2890, 16, 1446, 4672, 7920, 67, 1883, 273, 1431, 12, 2890, 18, 1883, 13, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
self.prop.window, _("Unable to edit song"),
self.prop, _("Unable to edit song"),
def save_song(model, path, iter): song = model[path][0] row = model[path] changed = False for i, h in enumerate(pattern.headers): if row[i + 2]: if not add or h not in song: song[h] = row[i + 2].decode("utf-8") changed = True else: vals = row[i + 2].decode("utf-8") for val in vals.split("\n"): if val not in song[h]: song.add(h, val) changed = True
44deb3636adb3eb180d1cc44ce811a1cb782bb80 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/4764/44deb3636adb3eb180d1cc44ce811a1cb782bb80/widgets.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1923, 67, 816, 75, 12, 2284, 16, 589, 16, 1400, 4672, 17180, 273, 938, 63, 803, 6362, 20, 65, 1027, 273, 938, 63, 803, 65, 3550, 273, 1083, 364, 277, 16, 366, 316, 4241, 12, 4951, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1923, 67, 816, 75, 12, 2284, 16, 589, 16, 1400, 4672, 17180, 273, 938, 63, 803, 6362, 20, 65, 1027, 273, 938, 63, 803, 65, 3550, 273, 1083, 364, 277, 16, 366, 316, 4241, 12, 4951, ...
f = codecs.open(os.path.join(self.libraryDir,self.mediaId + ".pickled"),mode='w',encoding="utf-8") cPickle.dump(self.__posts, f)
f = open(os.path.join(self.libraryDir,self.mediaId + ".pickled"),mode='wb') cPickle.dump(self.__posts, f, True)
def run(self): # Start the region timer so the media dies at the right time. self.p.enqueue('timer',(int(self.duration) * 1000,self.timerElapsed)) # Pointer to the currently displayed post: self.__pointer = -1 # Open previous cache file (if exists) and begin playing out posts # Lock the semaphore as we write to __posts to avoid changing the array as the display thread reads it. try: try: self.__lock.acquire() self.__posts = cPickle.load(file(os.path.join(self.libraryDir,self.mediaId + ".pickled"))) finally: self.__lock.release() except: # Erase any pickle file that may be existing but corrupted try: os.remove(os.path.join(self.libraryDir,self.mediaId + ".pickled")) except: pass
83f2b93054a66c026b568a9031e6f8fd17f9d143 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5464/83f2b93054a66c026b568a9031e6f8fd17f9d143/MicroblogMedia.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 12, 2890, 4672, 468, 3603, 326, 3020, 5441, 1427, 326, 3539, 27890, 622, 326, 2145, 813, 18, 365, 18, 84, 18, 21798, 2668, 12542, 2187, 12, 474, 12, 2890, 18, 8760, 13, 380, 4336...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 12, 2890, 4672, 468, 3603, 326, 3020, 5441, 1427, 326, 3539, 27890, 622, 326, 2145, 813, 18, 365, 18, 84, 18, 21798, 2668, 12542, 2187, 12, 474, 12, 2890, 18, 8760, 13, 380, 4336...
readlist = fileinfo[myfilehandle]['fobj'].readlines(*args)
try: readlist = fileinfo[myfilehandle]['fobj'].readlines(*args) except KeyError: raise ValueError("Invalid file object (probably closed).")
def readlines(self,*args): # prevent TOCTOU race with client changing my filehandle myfilehandle = self.filehandle restrictions.assertisallowed('file.readlines',*args)
4fb5dddefa372ea7ca176eb89dee92b27da7c593 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7995/4fb5dddefa372ea7ca176eb89dee92b27da7c593/emulfile.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 17546, 12, 2890, 16, 14, 1968, 4672, 468, 5309, 8493, 1268, 26556, 17996, 598, 1004, 12770, 3399, 585, 4110, 3399, 768, 4110, 273, 365, 18, 768, 4110, 17499, 18, 11231, 291, 8151, 2668, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 17546, 12, 2890, 16, 14, 1968, 4672, 468, 5309, 8493, 1268, 26556, 17996, 598, 1004, 12770, 3399, 585, 4110, 3399, 768, 4110, 273, 365, 18, 768, 4110, 17499, 18, 11231, 291, 8151, 2668, ...
if dy == 0 or dx == 0: return False
if dy == 0 and dx == 0: continue
def HitTest(self, x, y): if not self._lineControlPoints: return False
58c132bbfcbbed7ca67ed6c2c278306b885bee48 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12725/58c132bbfcbbed7ca67ed6c2c278306b885bee48/_lines.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 670, 305, 4709, 12, 2890, 16, 619, 16, 677, 4672, 309, 486, 365, 6315, 1369, 3367, 5636, 30, 327, 1083, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 670, 305, 4709, 12, 2890, 16, 619, 16, 677, 4672, 309, 486, 365, 6315, 1369, 3367, 5636, 30, 327, 1083, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
self.lirc.connect('/dev/lircd')
while 1: try: self.lirc.connect('/dev/lircd') break except: print "Couldn't open /dev/lircd, trying again in 10 seconds..." time.sleep(10)
def __init__(self): self.lirc = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
a0d9f09120e314303c55d1cea6b02d272864db1e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11399/a0d9f09120e314303c55d1cea6b02d272864db1e/remote.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 4672, 365, 18, 80, 481, 71, 273, 2987, 18, 7814, 12, 7814, 18, 6799, 67, 10377, 60, 16, 2987, 18, 3584, 3507, 67, 13693, 13, 2, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 4672, 365, 18, 80, 481, 71, 273, 2987, 18, 7814, 12, 7814, 18, 6799, 67, 10377, 60, 16, 2987, 18, 3584, 3507, 67, 13693, 13, 2, -100, -100, -100, -100, -10...
c = connection.execute ("select distinct OWNER from ALL_TAB_COLUMNS where TABLE_NAME = :table_name", {'table_name':name}) rows = c.fetchall() if not rows : raise exceptions.NoSuchTableError(table.name) else: if table.owner is not None: if table.owner.upper() in [r[0] for r in rows]: owner = table.owner.upper() else: raise exceptions.AssertionError("Specified owner %s does not own table %s"%(table.owner, table.name)) else: if len(rows)==1: owner = rows[0][0] else: raise exceptions.AssertionError("There are multiple tables with name %s in the schema, you must specifie owner"%table.name) c = connection.execute ("select COLUMN_NAME, DATA_TYPE, DATA_LENGTH, DATA_PRECISION, DATA_SCALE, NULLABLE, DATA_DEFAULT from ALL_TAB_COLUMNS where TABLE_NAME = :table_name and OWNER = :owner", {'table_name':name, 'owner':owner})
actual_name, owner, dblink = self._resolve_table_owner(connection, name, table) c = connection.execute ("select COLUMN_NAME, DATA_TYPE, DATA_LENGTH, DATA_PRECISION, DATA_SCALE, NULLABLE, DATA_DEFAULT from ALL_TAB_COLUMNS%(dblink)s where TABLE_NAME = :table_name and OWNER = :owner" % {'dblink':dblink}, {'table_name':actual_name, 'owner':owner})
def reflecttable(self, connection, table): preparer = self.identifier_preparer if not preparer.should_quote(table): name = table.name.upper() else: name = table.name c = connection.execute ("select distinct OWNER from ALL_TAB_COLUMNS where TABLE_NAME = :table_name", {'table_name':name}) rows = c.fetchall() if not rows : raise exceptions.NoSuchTableError(table.name) else: if table.owner is not None: if table.owner.upper() in [r[0] for r in rows]: owner = table.owner.upper() else: raise exceptions.AssertionError("Specified owner %s does not own table %s"%(table.owner, table.name)) else: if len(rows)==1: owner = rows[0][0] else: raise exceptions.AssertionError("There are multiple tables with name %s in the schema, you must specifie owner"%table.name)
3b3d94c9ab566aba411cacbe0a3cd08d6d76b735 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/1074/3b3d94c9ab566aba411cacbe0a3cd08d6d76b735/oracle.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3037, 2121, 12, 2890, 16, 1459, 16, 1014, 4672, 675, 21804, 273, 365, 18, 5644, 67, 1484, 21804, 309, 486, 675, 21804, 18, 13139, 67, 6889, 12, 2121, 4672, 508, 273, 1014, 18, 529, 18,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3037, 2121, 12, 2890, 16, 1459, 16, 1014, 4672, 675, 21804, 273, 365, 18, 5644, 67, 1484, 21804, 309, 486, 675, 21804, 18, 13139, 67, 6889, 12, 2121, 4672, 508, 273, 1014, 18, 529, 18,...
def on_resize_window(self): pass
def on_resize_window(self): # FIXME: Implement this pass
c647948a7fe81768afb7a383769e16ee7aac44bc /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/6502/c647948a7fe81768afb7a383769e16ee7aac44bc/terminal.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 67, 15169, 67, 5668, 12, 2890, 4672, 468, 9852, 30, 10886, 333, 1342, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 67, 15169, 67, 5668, 12, 2890, 4672, 468, 9852, 30, 10886, 333, 1342, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
for line in self.instance.fhandle:
for line in self.fhandle:
def parse(self): """ Run the state machine, parse the file line by line and call process() with the current matched symbol. """ i, lastlen = 1, 0 for line in self.instance.fhandle: line = _strstrip(line) if line == '': i = i+1 continue if line[:3] == '#~ ': line = line[3:] self.entry_obsolete = 1 else: self.entry_obsolete = 0 self.current_token = line if line[:2] == '#:': # we are on a occurences line self.process('OC', i) elif line[:7] == 'msgid "': # we are on a msgid self.process('MI', i) elif line[:8] == 'msgstr "': # we are on a msgstr self.process('MS', i) elif line[:1] == '"': # we are on a continuation line or some metadata self.process('MC', i) elif line[:14] == 'msgid_plural "': # we are on a msgid plural self.process('MP', i) elif line[:7] == 'msgstr[': # we are on a msgstr plural self.process('MX', i) elif line[:3] == '#, ': # we are on a flags line self.process('FL', i) elif line[:2] == '# ' or line == '#': if line == '#': line = line + ' ' # we are on a translator comment line self.process('TC', i) elif line[:2] == '#.': # we are on a generated comment line self.process('GC', i) i = i+1
418b31415acbfe149b6979cf1e06619eed712454 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9209/418b31415acbfe149b6979cf1e06619eed712454/polib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 12, 2890, 4672, 3536, 1939, 326, 919, 5228, 16, 1109, 326, 585, 980, 635, 980, 471, 745, 1207, 1435, 598, 326, 783, 4847, 3273, 18, 3536, 277, 16, 1142, 1897, 273, 404, 16, 374, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 12, 2890, 4672, 3536, 1939, 326, 919, 5228, 16, 1109, 326, 585, 980, 635, 980, 471, 745, 1207, 1435, 598, 326, 783, 4847, 3273, 18, 3536, 277, 16, 1142, 1897, 273, 404, 16, 374, ...
def do_body(self):
def do_header(self): stdout.write('/* Generated from ') stdout.write(get_descendant_text(get_by_path(self.spec, 'title'))) version = get_by_path(self.spec, "version")
e562d7cdac8cd00b1ec7c9a7392ec1fc7d3edbed /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/7593/e562d7cdac8cd00b1ec7c9a7392ec1fc7d3edbed/qt4-constants-gen.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 67, 3374, 12, 2890, 4672, 3909, 18, 2626, 2668, 20308, 11025, 628, 8624, 3909, 18, 2626, 12, 588, 67, 26236, 970, 67, 955, 12, 588, 67, 1637, 67, 803, 12, 2890, 18, 2793, 16, 29...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 67, 3374, 12, 2890, 4672, 3909, 18, 2626, 2668, 20308, 11025, 628, 8624, 3909, 18, 2626, 12, 588, 67, 26236, 970, 67, 955, 12, 588, 67, 1637, 67, 803, 12, 2890, 18, 2793, 16, 29...
Possible values are : 'left', 'right'. Default is 'left'. Return the first or last index where the key could be inserted.
Possible values are : 'left', 'right'. Default is 'left'. Return the first or last index where the key could be inserted.
def searchsorted(a, v, side='left'): """Returns indices where keys in v should be inserted to maintain order. *Description* Find the indices into a sorted array such that if the corresponding keys in v were inserted before the indices the order of a would be preserved. If side='left', then the first such index is returned. If side='right', then the last such index is returned. If there is no such index because the key is out of bounds, then the length of a is returned, i.e., the key would need to be appended. The returned index array has the same shape as v. *Parameters*: a : array 1-d array sorted in ascending order. v : array or list type Array of keys to be searched for in a. side : string Possible values are : 'left', 'right'. Default is 'left'. Return the first or last index where the key could be inserted. *Returns*: indices : integer array Array of insertion points with the same shape as v. *SeeAlso*: sort Inplace sort histogram Produce histogram from 1-d data *Notes* The array a must be 1-d and is assumed to be sorted in ascending order. Searchsorted uses binary search to find the required insertion points. """ try: searchsorted = a.searchsorted except AttributeError: return _wrapit(a, 'searchsorted', v, side) return searchsorted(v, side)
fe2f8b811f8f87475f51396eca9a848d2746ab2b /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/14925/fe2f8b811f8f87475f51396eca9a848d2746ab2b/fromnumeric.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1623, 10350, 12, 69, 16, 331, 16, 4889, 2218, 4482, 11, 4672, 3536, 1356, 4295, 1625, 1311, 316, 331, 1410, 506, 9564, 358, 17505, 1353, 18, 225, 380, 3291, 14, 225, 4163, 326, 4295, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1623, 10350, 12, 69, 16, 331, 16, 4889, 2218, 4482, 11, 4672, 3536, 1356, 4295, 1625, 1311, 316, 331, 1410, 506, 9564, 358, 17505, 1353, 18, 225, 380, 3291, 14, 225, 4163, 326, 4295, 1...
m.setCoordinate(elem.name+'_74', coord)
model.setCoordinate(elename+'_74', coord)
def subdivideHex(model, elem, refinenodes): """subdivide a Hex8 element |elem| around the nodes |lnodes| remove |elem| from the model |model| and insert the subelements """ assert(elem.shape.name == 'Hex8') # swap the element corners depending on the refine node / edge swapcorners = [ [0, 1, 2, 3, 4, 5, 6, 7], # u 0 [1, 2, 3, 0, 5, 6, 7, 4], # u 1 [2, 3, 0, 1, 6, 7, 4, 5], # u 2 [3, 0, 1, 2, 7, 4, 5, 6], # u 3 [4, 7, 6, 5, 0, 3, 2, 1], # o 0 [5, 4, 7, 6, 1, 0, 3, 2], # o 1 [6, 5, 4, 7, 2, 1, 0, 3], # o 2 [7, 6, 5, 4, 3, 2, 1, 0], # o 3 [0, 4, 5, 1, 3, 7, 6, 2], # s 0 [1, 5, 6, 2, 0, 4, 7, 3], # s 1 [2, 6, 7, 3, 1, 5, 4, 0], # s 2 [7, 3, 0, 4, 6, 2, 1, 5], # s 3 ] # useful default value refinenode = 0 # one edge needs to be refined if len(refinenodes) == 1: corneridx = [[ 0, 1, 5, 4, 16, 17, 21, 20], [ 1, 3, 15, 5, 17, 51, 63, 21], [ 4, 5, 15, 12, 20, 21, 63, 60], [16, 17, 21, 20, 48, 51, 63, 60] ] import types if type(refinenodes) == types.ListType: refinenode = refinenodes[0] else: refinenode = refinenodes elif len(refinenodes) == 2: corneridx = [ [ 0, 1, 5, 4, 16, 17, 21, 20], [ 1, 2, 6, 5, 17, 18, 22, 21], [ 5, 6, 10, 9, 21, 22, 42, 41], [ 2, 3, 7, 6, 18, 19, 23, 22], [17, 18, 22, 21, 33, 34, 42, 41], [ 4, 5, 9, 12, 20, 21, 41, 60], [ 6, 7, 15, 10, 22, 23, 63, 42], [ 9, 10, 15, 12, 41, 42, 63, 60], [16, 17, 21, 20, 48, 33, 41, 60], [18, 19, 23, 22, 34, 51, 63, 42], [33, 34, 42, 41, 48, 51, 63, 60], ] edgeindex = { (0, 1): 0, (1, 2): 1, (2, 3): 2, (0, 3): 3, (4, 5): 5, (5, 6): 6, (6, 7): 7, (4, 7): 4, (0, 4): 8, (1, 5): 9, (2, 6): 10, (3, 7): 11 } refinenodes.sort() refinenode = edgeindex[tuple(refinenodes)] elif len(refinenodes) == 4: corneridx = [ [ 0, 1, 5, 4, 16, 17, 21, 20], [ 1, 2, 6, 5, 17, 18, 22, 21], [ 2, 3, 7, 6, 18, 19, 23, 22], [ 4, 5, 9, 8, 20, 21, 25, 24], [ 5, 6, 10, 9, 21, 22, 26, 25], [ 6, 7, 11, 10, 22, 23, 27, 26], [ 8, 9, 13, 12, 24, 25, 29, 28], [ 9, 10, 14, 13, 25, 26, 30, 29], [10, 11, 15, 14, 26, 27, 31, 30], [17, 18, 22, 21, 33, 34, 72, 71], [22, 23, 27, 26, 72, 39, 43, 73], [25, 26, 30, 29, 74, 73, 46, 45], [20, 21, 25, 24, 36, 71, 74, 40], [21, 22, 26, 25, 71, 72, 73, 74], [16, 17, 21, 20, 48, 33, 71, 36], [18, 19, 23, 22, 34, 51, 39, 72], [26, 27, 31, 30, 73, 43, 63, 46], [24, 25, 29, 28, 40, 74, 45, 60], [71, 72, 73, 74, 33, 34, 46, 45], [36, 71, 74, 40, 48, 33, 45, 60], [72, 39, 43, 73, 34, 51, 63, 46], [33, 34, 46, 45, 48, 51, 63, 60], ] edgeindex = { (0, 1, 2, 3): 0, # unten (4, 5, 6, 7): 5, # oben (0, 1, 4, 5): 8, # !!!!!!!! falsche Transfromation (1, 2, 6, 5): 9, # rechts (2, 3, 6, 7): 10, # hinten (0, 3, 4, 7): 11, # links } refinenodes.sort() refinenode = edgeindex[tuple(refinenodes)] elif len(refinenodes) == 8: corneridx = [ [ 0, 1, 5, 4, 16, 17, 21, 20], [ 1, 2, 6, 5, 17, 18, 22, 21], [ 2, 3, 7, 6, 18, 19, 23, 22], [ 4, 5, 9, 8, 20, 21, 25, 24], [ 5, 6, 10, 9, 21, 22, 26, 25], [ 6, 7, 11, 10, 22, 23, 27, 26], [ 8, 9, 13, 12, 24, 25, 29, 28], [ 9, 10, 14, 13, 25, 26, 30, 29], [10, 11, 15, 14, 26, 27, 31, 30], [16, 17, 21, 20, 32, 33, 37, 36], [17, 18, 22, 21, 33, 34, 38, 37], [18, 19, 23, 22, 34, 35, 39, 38], [20, 21, 25, 24, 36, 37, 41, 40], [21, 22, 26, 25, 37, 38, 42, 41], [22, 23, 27, 26, 38, 39, 43, 42], [24, 25, 29, 28, 40, 41, 45, 44], [25, 26, 30, 29, 41, 42, 46, 45], [26, 27, 31, 30, 42, 43, 47, 46], [32, 33, 37, 36, 48, 49, 53, 52], [33, 34, 38, 37, 49, 50, 54, 53], [34, 35, 39, 38, 50, 51, 55, 54], [36, 37, 41, 40, 52, 53, 57, 56], [37, 38, 42, 41, 53, 54, 58, 57], [38, 39, 43, 42, 54, 55, 59, 58], [40, 41, 45, 44, 56, 57, 61, 60], [41, 42, 46, 45, 57, 58, 62, 61], [42, 43, 47, 46, 58, 59, 63, 62], ] else: print 'not implemented, nonsense' pass # change the order of the corner nodes of the element according to the # refinement nodes # this is no problem since the element gets removed at the end e.nodes = [e.nodes[x] for x in swapcorners[refinenode]] e.nodcoord = [e.nodcoord[x] for x in swapcorners[refinenode]] # additional nodes on a 2.5th level, only for 4 node side if len(refinenodes) == 4: coord = elem.findGlobalCoord(N.array([1., 1., 1.5])/3.*2.-1.) m.setCoordinate(elem.name+'_71', coord) coord = elem.findGlobalCoord(N.array([2., 1., 1.5])/3.*2.-1.) m.setCoordinate(elem.name+'_72', coord) coord = elem.findGlobalCoord(N.array([2., 2., 1.5])/3.*2.-1.) m.setCoordinate(elem.name+'_73', coord) coord = elem.findGlobalCoord(N.array([1., 2., 1.5])/3.*2.-1.) m.setCoordinate(elem.name+'_74', coord) cornerlist = [0,3,15,12,48,51,63,60] for i, corners in enumerate(corneridx): for n in corners: if n < 64 and not n in cornerlist: lcoord = N.array((n%4, n//4 %4, n//16))/3.*2. -1. coord = elem.findGlobalCoord(lcoord) m.setCoordinate(elem.name+'_%d' % n, coord) nodenames = [elem.name+'_%d' % n for n in corners] # the order in the list is crucial! for n, nn in zip(elem.nodes, [elem.name+'_%d' % n for n in cornerlist]): try: nodenames[nodenames.index(nn)] = n except: pass m.setElement(elem.name + '_%s' % i, 'Hex8', nodenames) m.removeElement(e.name)
a06f60abf58eb66adcf5b72de6f94dfa2c242c77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2567/a06f60abf58eb66adcf5b72de6f94dfa2c242c77/Adaptive.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 25741, 831, 7037, 12, 2284, 16, 3659, 16, 25994, 275, 1145, 4672, 3536, 1717, 2892, 831, 279, 15734, 28, 930, 571, 10037, 96, 6740, 326, 2199, 571, 2370, 1145, 96, 1206, 571, 10037, 96, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 25741, 831, 7037, 12, 2284, 16, 3659, 16, 25994, 275, 1145, 4672, 3536, 1717, 2892, 831, 279, 15734, 28, 930, 571, 10037, 96, 6740, 326, 2199, 571, 2370, 1145, 96, 1206, 571, 10037, 96, ...
label = self.body[size] text = ''.join(self.body[size+1:])
text = ''.join(self.body[size:])
def depart_citation(self, node): size = self.context.pop() label = self.body[size] text = ''.join(self.body[size+1:]) del self.body[size:] self.bibitems.append([label, text])
b65c48280a2e0b4d11111137d4447a7b98c63f9e /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/7032/b65c48280a2e0b4d11111137d4447a7b98c63f9e/latexwriter.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 26000, 67, 71, 8773, 12, 2890, 16, 756, 4672, 963, 273, 365, 18, 2472, 18, 5120, 1435, 977, 273, 875, 18, 5701, 12, 2890, 18, 3432, 63, 1467, 30, 5717, 1464, 365, 18, 3432, 63, 1467,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 26000, 67, 71, 8773, 12, 2890, 16, 756, 4672, 963, 273, 365, 18, 2472, 18, 5120, 1435, 977, 273, 875, 18, 5701, 12, 2890, 18, 3432, 63, 1467, 30, 5717, 1464, 365, 18, 3432, 63, 1467,...
def edit(uri, http_user, hostname):
def edit(uri, http_user, hostname, configuration=None, machines=None, **kwargs):
def edit(uri, http_user, hostname): """Edit an machine record, based on hostname.""" return (w.HTTP_TYPE_TEXT, "not implemented yet.") title = _("Edit %s's record") % hostname if protected_user(hostname): return w.forgery_error(title) data = w.page_body_start(uri, http_user, ctxtnav, title, False) try: machine = machines.machines[machines.hostname_to_mid(hostname)] try: profile = \ profiles.profiles[ groups.groups[machine['gidNumber']]['name'] ]['name'] except KeyError: profile = _("Standard account") dbl_lists = {} for filter, titles, id in groups_filters_lists_ids: dest = list(machine['groups'].copy()) source = [ groups.groups[gid]['name'] \ for gid in groups.Select(filter) ] for current in dest[:]: try: source.remove(current) except ValueError: dest.remove(current) dest.sort() source.sort() dbl_lists[filter] = w.doubleListBox(titles, id, source, dest) form_name = "user_edit_form" data += '''<div id="edit_form">
5c6e0536d2363f635c0b43a50e8af7ca9d102236 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/5c6e0536d2363f635c0b43a50e8af7ca9d102236/machines.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3874, 12, 1650, 16, 1062, 67, 1355, 16, 5199, 16, 1664, 33, 7036, 16, 15942, 33, 7036, 16, 2826, 4333, 4672, 3536, 4666, 392, 5228, 1409, 16, 2511, 603, 5199, 12123, 225, 327, 261, 91,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3874, 12, 1650, 16, 1062, 67, 1355, 16, 5199, 16, 1664, 33, 7036, 16, 15942, 33, 7036, 16, 2826, 4333, 4672, 3536, 4666, 392, 5228, 1409, 16, 2511, 603, 5199, 12123, 225, 327, 261, 91,...
def fl_get_slider_increment(ob, l, r): """ fl_get_slider_increment(ob, l, r) """ _fl_get_slider_increment(ob, l, r)
def fl_get_slider_increment(pObject, l, r): """ fl_get_slider_increment(pObject, l, r) """ _fl_get_slider_increment(pObject, l, r)
def fl_get_slider_increment(ob, l, r): """ fl_get_slider_increment(ob, l, r) """ _fl_get_slider_increment(ob, l, r)
9942dac8ce2b35a1e43615a26fd8e7054ef805d3 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/2429/9942dac8ce2b35a1e43615a26fd8e7054ef805d3/xformslib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1183, 67, 588, 67, 28372, 67, 15016, 12, 84, 921, 16, 328, 16, 436, 4672, 3536, 1183, 67, 588, 67, 28372, 67, 15016, 12, 84, 921, 16, 328, 16, 436, 13, 3536, 225, 389, 2242, 67, 58...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1183, 67, 588, 67, 28372, 67, 15016, 12, 84, 921, 16, 328, 16, 436, 4672, 3536, 1183, 67, 588, 67, 28372, 67, 15016, 12, 84, 921, 16, 328, 16, 436, 13, 3536, 225, 389, 2242, 67, 58...
s.addIndex('EMA-20', EMA, s, 20) s.addIndex('EMA-40', EMA, s, 40) v = s.addIndex('KAMA-10', KAMA, s, 10) v.addIndex('EMA-5', EMA, v, 5)
if EMA and KAMA: s.addIndex('EMA-20', EMA, s, 20) s.addIndex('EMA-40', EMA, s, 40) v = s.addIndex('KAMA-10', KAMA, s, 10) v.addIndex('EMA-5', EMA, v, 5)
def series(self, tickerId, field): s = Series() s.addIndex('EMA-20', EMA, s, 20) s.addIndex('EMA-40', EMA, s, 40) v = s.addIndex('KAMA-10', KAMA, s, 10) v.addIndex('EMA-5', EMA, v, 5) return s
77586db5172791426fe8f5c7ab4b1aaae6746641 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/3979/77586db5172791426fe8f5c7ab4b1aaae6746641/__init__.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4166, 12, 2890, 16, 14063, 548, 16, 652, 4672, 272, 273, 9225, 1435, 309, 512, 5535, 471, 1475, 2192, 37, 30, 272, 18, 1289, 1016, 2668, 3375, 37, 17, 3462, 2187, 512, 5535, 16, 272, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4166, 12, 2890, 16, 14063, 548, 16, 652, 4672, 272, 273, 9225, 1435, 309, 512, 5535, 471, 1475, 2192, 37, 30, 272, 18, 1289, 1016, 2668, 3375, 37, 17, 3462, 2187, 512, 5535, 16, 272, ...
user = user_pool.browse(cr, uid, uid, uid)
user = user_pool.browse(cr, uid, uid, context)
def _get_company(self, cr, uid, ids, context={}): user_pool = self.pool.get('res.users') company_pool = self.pool.get('res.company') user = user_pool.browse(cr, uid, uid, uid) company_id = user.company_id and user.company_id.id if not company_id: company_id = company_pool.search(cr, uid, [])[0]
4fd67b69c6b0fc3ce35920df8e8fdc664e1fbc7b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/4fd67b69c6b0fc3ce35920df8e8fdc664e1fbc7b/account_cash_statement.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 67, 16840, 12, 2890, 16, 4422, 16, 4555, 16, 3258, 16, 819, 12938, 4672, 729, 67, 6011, 273, 365, 18, 6011, 18, 588, 2668, 455, 18, 5577, 6134, 9395, 67, 6011, 273, 365, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 67, 16840, 12, 2890, 16, 4422, 16, 4555, 16, 3258, 16, 819, 12938, 4672, 729, 67, 6011, 273, 365, 18, 6011, 18, 588, 2668, 455, 18, 5577, 6134, 9395, 67, 6011, 273, 365, 18...
s += write(")")
r += cls.write(")")
def closing(self, node): s = u'' if node.parent.type == "loop": s += write(")")
01a99d0db0499777506f7fbf9ecf7a7e70c7a9e2 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/5718/01a99d0db0499777506f7fbf9ecf7a7e70c7a9e2/Packer.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7647, 12, 2890, 16, 756, 4672, 272, 273, 582, 6309, 309, 756, 18, 2938, 18, 723, 422, 315, 6498, 6877, 272, 1011, 1045, 2932, 2225, 13, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7647, 12, 2890, 16, 756, 4672, 272, 273, 582, 6309, 309, 756, 18, 2938, 18, 723, 422, 315, 6498, 6877, 272, 1011, 1045, 2932, 2225, 13, 2, -100, -100, -100, -100, -100, -100, -100, -10...
"""Return value for form inpurt."""
"""Return value for form input."""
def getValue(request, argument): """Return value for form inpurt.""" values = request.args.get(argument.name, None) if values: try: return argument.coerce(values[0]) except formmethod.InputError: return values[0] return argument.default
4fcf3974af001e6becd57d1555de5170b6788478 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12595/4fcf3974af001e6becd57d1555de5170b6788478/form.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2366, 12, 2293, 16, 1237, 4672, 3536, 990, 460, 364, 646, 810, 12123, 924, 273, 590, 18, 1968, 18, 588, 12, 3446, 18, 529, 16, 599, 13, 309, 924, 30, 775, 30, 327, 1237, 18, 2894, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2366, 12, 2293, 16, 1237, 4672, 3536, 990, 460, 364, 646, 810, 12123, 924, 273, 590, 18, 1968, 18, 588, 12, 3446, 18, 529, 16, 599, 13, 309, 924, 30, 775, 30, 327, 1237, 18, 2894, ...
self.session.delete(crecs[0]) self.session.delete(c) self.session.flush()
self.objectstore.delete(crecs[0]) self.objectstore.delete(c) self.objectstore.flush()
def test_duplicate_password_reset(self): """Try to reset a password twice.
6464a88622de6213e0c72b0c4204ad3f2f8d3651 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12856/6464a88622de6213e0c72b0c4204ad3f2f8d3651/test_account.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 17342, 67, 3664, 67, 6208, 12, 2890, 4672, 3536, 7833, 358, 2715, 279, 2201, 13605, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 17342, 67, 3664, 67, 6208, 12, 2890, 4672, 3536, 7833, 358, 2715, 279, 2201, 13605, 18, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -...
self.lock.acquire()
def readline(self): #check data for a full line (terminated by \n or EOF) #if the line is there, extract it and return #if not a complete line, set a request for input from the client # with a hint containing the current data in the line # then wait on a mutex ## TODO: Could optionally raise a keyboard interrupt while 1: self.lock.acquire() ind=self.data.find('\n') if ind>0: line=self.data[:ind] self.data=self.data[ind+1:] self.lock.release() return line self.inputrequest=True self.lock.wait() self.inputrequest=False
8bda109ca69a3384eb0e570c4fc302015e431e46 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/2579/8bda109ca69a3384eb0e570c4fc302015e431e46/interp.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 12023, 12, 2890, 4672, 468, 1893, 501, 364, 279, 1983, 980, 261, 29133, 635, 521, 82, 578, 6431, 13, 468, 430, 326, 980, 353, 1915, 16, 2608, 518, 471, 327, 468, 430, 486, 279, 3912, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 12023, 12, 2890, 4672, 468, 1893, 501, 364, 279, 1983, 980, 261, 29133, 635, 521, 82, 578, 6431, 13, 468, 430, 326, 980, 353, 1915, 16, 2608, 518, 471, 327, 468, 430, 486, 279, 3912, ...
if action.startswith("no"):
if action.startswith("un"):
def filter_message(msg, mgr, all_actions=True): config = mgr.config.filter prob = mgr.score(msg) prob_perc = prob * 100 if prob_perc >= config.spam_threshold: disposition = "Yes" attr_prefix = "spam" elif prob_perc >= config.unsure_threshold: disposition = "Unsure" attr_prefix = "unsure" else: disposition = "No" attr_prefix = None try: msg.SetField(mgr.config.field_score_name, prob) msg.Save() if all_actions and attr_prefix is not None: folder_id = getattr(config, attr_prefix + "_folder_id") action = getattr(config, attr_prefix + "_action").lower() if action.startswith("no"): pass elif action.startswith("co"): dest_folder = mgr.message_store.GetFolder(folder_id) msg.CopyTo(dest_folder) elif action.startswith("mo"): dest_folder = mgr.message_store.GetFolder(folder_id) msg.MoveTo(dest_folder) else: raise RuntimeError, "Eeek - bad action '%r'" % (action,) return disposition except: print "Failed filtering message!", msg import traceback traceback.print_exc() return "Failed"
1f9cf1101e0b2ee8dbd377e8413887769d5bdc80 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/6126/1f9cf1101e0b2ee8dbd377e8413887769d5bdc80/filter.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1034, 67, 2150, 12, 3576, 16, 13333, 16, 777, 67, 4905, 33, 5510, 4672, 642, 273, 13333, 18, 1425, 18, 2188, 3137, 273, 13333, 18, 6355, 12, 3576, 13, 3137, 67, 457, 71, 273, 3137, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1034, 67, 2150, 12, 3576, 16, 13333, 16, 777, 67, 4905, 33, 5510, 4672, 642, 273, 13333, 18, 1425, 18, 2188, 3137, 273, 13333, 18, 6355, 12, 3576, 13, 3137, 67, 457, 71, 273, 3137, 3...
self.supports_cast = (self.dbapi is None or vers(self.dbapi.sqlite_version) >= vers("3.2.3"))
def vers(num): return tuple([int(x) for x in num.split('.')])
448b3a22bedd02f9605b542940de26aa796ae564 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/1074/448b3a22bedd02f9605b542940de26aa796ae564/sqlite.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 14690, 12, 2107, 4672, 327, 3193, 3816, 474, 12, 92, 13, 364, 619, 316, 818, 18, 4939, 2668, 1093, 13, 5717, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 14690, 12, 2107, 4672, 327, 3193, 3816, 474, 12, 92, 13, 364, 619, 316, 818, 18, 4939, 2668, 1093, 13, 5717, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
if not context:
if context is None:
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False): """ Search for record/s with or without domain
3ec15857cea8a0a5417e2549350ab7f13b2efc04 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12853/3ec15857cea8a0a5417e2549350ab7f13b2efc04/orm.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1623, 12, 2890, 16, 4422, 16, 729, 16, 833, 16, 1384, 33, 20, 16, 1800, 33, 7036, 16, 1353, 33, 7036, 16, 819, 33, 7036, 16, 1056, 33, 8381, 4672, 3536, 5167, 364, 1409, 19, 87, 59...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1623, 12, 2890, 16, 4422, 16, 729, 16, 833, 16, 1384, 33, 20, 16, 1800, 33, 7036, 16, 1353, 33, 7036, 16, 819, 33, 7036, 16, 1056, 33, 8381, 4672, 3536, 5167, 364, 1409, 19, 87, 59...
flush_header_t = ('header',language["route_flush"])
flush_header_t = ('header', language["route_flush"])
def __init__(self,body,pos,ui,dbus=None): global daemon, wireless, wired
4fa245b79a41a22da17ae3febbf053c5dd751a5f /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/12280/4fa245b79a41a22da17ae3febbf053c5dd751a5f/prefs_curses.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 3432, 16, 917, 16, 4881, 16, 1966, 407, 33, 7036, 4672, 2552, 8131, 16, 6636, 2656, 16, 341, 2921, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 3432, 16, 917, 16, 4881, 16, 1966, 407, 33, 7036, 4672, 2552, 8131, 16, 6636, 2656, 16, 341, 2921, 2, -100, -100, -100, -100, -100, -100, -100, -100, -10...
new = planet()
new = Planet()
def setup(): "Prepare to play, set up cosmos." w = coord() # Decide how many of everything if choose(): return # frozen game # Prepare the Enterprise game.alldone = game.gamewon = game.shldchg = game.shldup = False game.ship = 'E' game.state.crew = FULLCREW game.energy = game.inenrg = 5000.0 game.shield = game.inshld = 2500.0 game.inlsr = 4.0 game.lsupres = 4.0 game.quadrant = randplace(GALSIZE) game.sector = randplace(QUADSIZE) game.torps = game.intorps = 10 game.nprobes = randrange(2, 5) game.warpfac = 5.0 for i in range(NDEVICES): game.damage[i] = 0.0 # Set up assorted game parameters game.battle = coord() game.state.date = game.indate = 100.0 * randreal(20, 51) game.nkinks = game.nhelp = game.casual = game.abandoned = 0 game.iscate = game.resting = game.imine = game.icrystl = game.icraft = False game.isatb = game.state.nplankl = 0 game.state.starkl = game.state.basekl = 0 game.iscraft = "onship" game.landed = False game.alive = True # Starchart is functional but we've never seen it game.lastchart = FOREVER # Put stars in the galaxy game.instar = 0 for i in range(GALSIZE): for j in range(GALSIZE): k = randrange(1, QUADSIZE**2/10+1) game.instar += k game.state.galaxy[i][j].stars = k # Locate star bases in galaxy for i in range(game.inbase): while True: while True: w = randplace(GALSIZE) if not game.state.galaxy[w.i][w.j].starbase: break contflag = False # C version: for (j = i-1; j > 0; j--) # so it did them in the opposite order. for j in range(1, i): # Improved placement algorithm to spread out bases distq = (w - game.state.baseq[j]).distance() if distq < 6.0*(BASEMAX+1-game.inbase) and withprob(0.75): contflag = True if game.idebug: prout("=== Abandoning base #%d at %s" % (i, w)) break elif distq < 6.0 * (BASEMAX+1-game.inbase): if game.idebug: prout("=== Saving base #%d, close to #%d" % (i, j)) if not contflag: break game.state.baseq.append(w) game.state.galaxy[w.i][w.j].starbase = game.state.chart[w.i][w.j].starbase = True # Position ordinary Klingon Battle Cruisers krem = game.inkling klumper = 0.25*game.skill*(9.0-game.length)+1.0 if klumper > MAXKLQUAD: klumper = MAXKLQUAD while True: r = randreal() klump = (1.0 - r*r)*klumper if klump > krem: klump = krem krem -= klump while True: w = randplace(GALSIZE) if not game.state.galaxy[w.i][w.j].supernova and \ game.state.galaxy[w.i][w.j].klingons + klump <= MAXKLQUAD: break game.state.galaxy[w.i][w.j].klingons += int(klump) if krem <= 0: break # Position Klingon Commander Ships for i in range(game.incom): while True: w = randplace(GALSIZE) if not welcoming(w) or w in game.state.kcmdr: continue if (game.state.galaxy[w.i][w.j].klingons or withprob(0.25)): break game.state.galaxy[w.i][w.j].klingons += 1 game.state.kcmdr.append(w) # Locate planets in galaxy for i in range(game.inplan): while True: w = randplace(GALSIZE) if game.state.galaxy[w.i][w.j].planet == None: break new = planet() new.quadrant = w new.crystals = "absent" if (game.options & OPTION_WORLDS) and i < NINHAB: new.pclass = "M" # All inhabited planets are class M new.crystals = "absent" new.known = "known" new.name = systnames[i] new.inhabited = True else: new.pclass = ("M", "N", "O")[randrange(0, 3)] if withprob(0.33): new.crystals = "present" new.known = "unknown" new.inhabited = False game.state.galaxy[w.i][w.j].planet = new game.state.planets.append(new) # Locate Romulans for i in range(game.state.nromrem): w = randplace(GALSIZE) game.state.galaxy[w.i][w.j].romulans += 1 # Place the Super-Commander if needed if game.state.nscrem > 0: while True: w = randplace(GALSIZE) if welcoming(w): break game.state.kscmdr = w game.state.galaxy[w.i][w.j].klingons += 1 # Initialize times for extraneous events schedule(FSNOVA, expran(0.5 * game.intime)) schedule(FTBEAM, expran(1.5 * (game.intime / len(game.state.kcmdr)))) schedule(FSNAP, randreal(1.0, 2.0)) # Force an early snapshot schedule(FBATTAK, expran(0.3*game.intime)) unschedule(FCDBAS) if game.state.nscrem: schedule(FSCMOVE, 0.2777) else: unschedule(FSCMOVE) unschedule(FSCDBAS) unschedule(FDSPROB) if (game.options & OPTION_WORLDS) and game.skill >= SKILL_GOOD: schedule(FDISTR, expran(1.0 + game.intime)) else: unschedule(FDISTR) unschedule(FENSLV) unschedule(FREPRO) # Place thing (in tournament game, we don't want one!) # New in SST2K: never place the Thing near a starbase. # This makes sense and avoids a special case in the old code. global thing if game.tourn is None: while True: thing = randplace(GALSIZE) if thing not in game.state.baseq: break skip(2) game.state.snap = False if game.skill == SKILL_NOVICE: prout(_("It is stardate %d. The Federation is being attacked by") % int(game.state.date)) prout(_("a deadly Klingon invasion force. As captain of the United")) prout(_("Starship U.S.S. Enterprise, it is your mission to seek out")) prout(_("and destroy this invasion force of %d battle cruisers.") % ((game.inkling + game.incom + game.inscom))) prout(_("You have an initial allotment of %d stardates to complete") % int(game.intime)) prout(_("your mission. As you proceed you may be given more time.")) skip(1) prout(_("You will have %d supporting starbases.") % (game.inbase)) proutn(_("Starbase locations- ")) else: prout(_("Stardate %d.") % int(game.state.date)) skip(1) prout(_("%d Klingons.") % (game.inkling + game.incom + game.inscom)) prout(_("An unknown number of Romulans.")) if game.state.nscrem: prout(_("And one (GULP) Super-Commander.")) prout(_("%d stardates.") % int(game.intime)) proutn(_("%d starbases in ") % game.inbase) for i in range(game.inbase): proutn(`game.state.baseq[i]`) proutn(" ") skip(2) proutn(_("The Enterprise is currently in Quadrant %s") % game.quadrant) proutn(_(" Sector %s") % game.sector) skip(2) prout(_("Good Luck!")) if game.state.nscrem: prout(_(" YOU'LL NEED IT.")) waitfor() newqad() if len(game.enemies) - (thing == game.quadrant) - (game.tholian != None): game.shldup = True if game.neutz: # bad luck to start in a Romulan Neutral Zone attack(torps_ok=False)
e571d502c19abfb5a3320300c347793da7b43d05 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3176/e571d502c19abfb5a3320300c347793da7b43d05/sst.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3875, 13332, 315, 7543, 358, 6599, 16, 444, 731, 4987, 26719, 1199, 341, 273, 2745, 1435, 468, 225, 3416, 831, 3661, 4906, 434, 7756, 309, 9876, 13332, 327, 468, 12810, 7920, 468, 7730, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3875, 13332, 315, 7543, 358, 6599, 16, 444, 731, 4987, 26719, 1199, 341, 273, 2745, 1435, 468, 225, 3416, 831, 3661, 4906, 434, 7756, 309, 9876, 13332, 327, 468, 12810, 7920, 468, 7730, ...
sql = "INSERT INTO %s (%s) VALUES (%s)" %(self.name, ",".join(ks),",".join(vals))
s1 = ",".join(ks) s2 = ",".join(vals) sql = "INSERT INTO %s (%s) VALUES (%s)" %(self.name,s1,s2)
def insert(self,*args,**kw): """Insert a record in the database Parameters can be positional or keyword arguments. If positional they must be in the same order as in the create() method If some of the fields are missing the value is set to None Returns the record identifier """ if args: kw = dict([(f,arg) for f,arg in zip(self.all_fields[2:],args)]) kw["__version__"] = 0
1e2c6f6403a0572c5afe29309a56e11888cd03bf /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/6310/1e2c6f6403a0572c5afe29309a56e11888cd03bf/SQLite.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2243, 12, 2890, 16, 14, 1968, 16, 636, 9987, 4672, 3536, 4600, 279, 1409, 316, 326, 2063, 7012, 848, 506, 16780, 578, 4932, 1775, 18, 971, 16780, 2898, 1297, 506, 316, 326, 1967, 1353, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2243, 12, 2890, 16, 14, 1968, 16, 636, 9987, 4672, 3536, 4600, 279, 1409, 316, 326, 2063, 7012, 848, 506, 16780, 578, 4932, 1775, 18, 971, 16780, 2898, 1297, 506, 316, 326, 1967, 1353, ...
tool.Inventory(bundle)
tool.Inventory([bundle])
def Install(self): '''Install all entries''' self.DispatchInstallCalls(self.whitelist) if self.modified: # Handle Bundle interdeps mods = self.modified mbundles = [struct for struct in self.config if struct.tag == 'Bundle' and \ [mod for mod in mods if mod in struct]] if mbundles: self.logger.info("The Following Bundles have been modifed:") self.logger.info([mbun.get('name') for mbun in mbundles]) self.logger.info("") tbm = [(t, b) for t in self.tools for b in mbundles] for tool, bundle in tbm: try: tool.Inventory(bundle) except: self.logger.error("%s.Inventory() call failed:" % tool.__name__, exc_info=1) clobbered = [entry for bundle in mbundles for entry in bundle \ if not self.states[entry]] if not self.setup['interactive']: self.DispatchInstallCalls(clobbered) for tool, bundle in tbm: try: tool.BundleUpdated(bundle) except: self.logger.error("%s.BundleUpdated() call failed:" % (tool.__name__), exc_info=1)
e916dbf04f641631dabc5b48d3269748c099a4b7 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/11867/e916dbf04f641631dabc5b48d3269748c099a4b7/Frame.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 10284, 12, 2890, 4672, 9163, 6410, 777, 3222, 26418, 365, 18, 5325, 6410, 10125, 12, 2890, 18, 20409, 13, 309, 365, 18, 7342, 30, 468, 5004, 8539, 1554, 14877, 15546, 273, 365, 18, 7342,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 10284, 12, 2890, 4672, 9163, 6410, 777, 3222, 26418, 365, 18, 5325, 6410, 10125, 12, 2890, 18, 20409, 13, 309, 365, 18, 7342, 30, 468, 5004, 8539, 1554, 14877, 15546, 273, 365, 18, 7342,...
for i in xrange(40): all += Object('toto%5d' % i, 'toto.c') all+= Command( 'broken', 'toto.c', explode)
all.extend(Command( 'broken', 'toto.c', explode))
def explode(env, target, source): os.killpg(0, signal.SIGINT)
b70b67a9c21cad81241307236bedee1f0256fcd8 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12817/b70b67a9c21cad81241307236bedee1f0256fcd8/KeyboardInterrupt.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3172, 12, 3074, 16, 1018, 16, 1084, 4672, 1140, 18, 16418, 8365, 12, 20, 16, 4277, 18, 18513, 3217, 13, 225, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3172, 12, 3074, 16, 1018, 16, 1084, 4672, 1140, 18, 16418, 8365, 12, 20, 16, 4277, 18, 18513, 3217, 13, 225, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
SimpleActionState('detect_handle', DoorAction, goal_slots = ['door'], result_slots = ['door']),
SimpleActionState('detect_handle', DoorAction, goal_slots = ['door'],
def detect_door_result_cb(ud, status, result): if status == GoalStatus.SUCCEEDED: if result.door.latch_state == Door.UNLATCHED: return 'unlatched' else: return 'closed' return 'aborted'
22bf1c631140e70f42912797a2f1dfea01fa1768 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9841/22bf1c631140e70f42912797a2f1dfea01fa1768/executive.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 5966, 67, 2896, 280, 67, 2088, 67, 7358, 12, 1100, 16, 1267, 16, 563, 4672, 309, 1267, 422, 4220, 287, 1482, 18, 6639, 39, 26031, 30, 309, 563, 18, 2896, 280, 18, 80, 505, 67, 2019, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 5966, 67, 2896, 280, 67, 2088, 67, 7358, 12, 1100, 16, 1267, 16, 563, 4672, 309, 1267, 422, 4220, 287, 1482, 18, 6639, 39, 26031, 30, 309, 563, 18, 2896, 280, 18, 80, 505, 67, 2019, ...
self._AttemptRebase(upstream_branch, files, verbose=options.verbose,
self._AttemptRebase(upstream_branch, files, options,
def update(self, options, args, file_list): """Runs git to update or transparently checkout the working copy.
e603c688da503d6a6db9690684aa49a1b2a54706 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6076/e603c688da503d6a6db9690684aa49a1b2a54706/gclient_scm.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 12, 2890, 16, 702, 16, 833, 16, 585, 67, 1098, 4672, 3536, 9361, 5071, 358, 1089, 578, 17270, 715, 13926, 326, 5960, 1610, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 12, 2890, 16, 702, 16, 833, 16, 585, 67, 1098, 4672, 3536, 9361, 5071, 358, 1089, 578, 17270, 715, 13926, 326, 5960, 1610, 18, 2, -100, -100, -100, -100, -100, -100, -100, -100, ...
% ('0.3.3', __version__)
% ('0.3.9', __version__)
def render(self, req, mimetype, content, filename=None, rev=None): try: from docutils import nodes from docutils.core import publish_string from docutils.parsers import rst from docutils import __version__ except ImportError: raise TracError, 'Docutils not found' if StrictVersion(__version__) < StrictVersion('0.3.3'): raise TracError, 'Docutils version >= %s required, %s found' \ % ('0.3.3', __version__)
001bc5b2e57f5bd8d7a85bcdfbfe3d8e58ea8bf4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9317/001bc5b2e57f5bd8d7a85bcdfbfe3d8e58ea8bf4/rst.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1743, 12, 2890, 16, 1111, 16, 12595, 16, 913, 16, 1544, 33, 7036, 16, 5588, 33, 7036, 4672, 775, 30, 628, 997, 5471, 1930, 2199, 628, 997, 5471, 18, 3644, 1930, 3808, 67, 1080, 628, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1743, 12, 2890, 16, 1111, 16, 12595, 16, 913, 16, 1544, 33, 7036, 16, 5588, 33, 7036, 4672, 775, 30, 628, 997, 5471, 1930, 2199, 628, 997, 5471, 18, 3644, 1930, 3808, 67, 1080, 628, ...
SyntaxError: augmented assignment to generator expression not possible (<doctest test.test_syntax[30]>, line 1)
SyntaxError: augmented assignment to generator expression not possible (<doctest test.test_syntax[31]>, line 1)
>>> def f(it, *varargs):
7602160c4ba3a0e106e33e34ec1ceef4ae274418 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8125/7602160c4ba3a0e106e33e34ec1ceef4ae274418/test_syntax.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 4080, 1652, 284, 12, 305, 16, 380, 1401, 1968, 4672, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 4080, 1652, 284, 12, 305, 16, 380, 1401, 1968, 4672, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
slistbox ScrolledListBox tick Button } cross Button } present if created with the fancy option"""
slistbox ScrolledListBox tick Button cross Button : present if created with the fancy option"""
def invoke(self, name): if self.subwidget_list.has_key(name): self.tk.call(self._w, 'invoke', name)
01c920b9b56baabd2e1dd945affd1c17c1669b20 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/01c920b9b56baabd2e1dd945affd1c17c1669b20/Tix.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4356, 12, 2890, 16, 508, 4672, 309, 365, 18, 1717, 6587, 67, 1098, 18, 5332, 67, 856, 12, 529, 4672, 365, 18, 16099, 18, 1991, 12, 2890, 6315, 91, 16, 296, 14407, 2187, 508, 13, 2, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4356, 12, 2890, 16, 508, 4672, 309, 365, 18, 1717, 6587, 67, 1098, 18, 5332, 67, 856, 12, 529, 4672, 365, 18, 16099, 18, 1991, 12, 2890, 6315, 91, 16, 296, 14407, 2187, 508, 13, 2, ...
varname = DottedName(name, var_doc.name)
def _assign_canonical_names(self, val_doc, name, score=0): """ Assign a canonical name to C{val_doc} (if it doesn't have one already), and (recursively) to each variable in C{val_doc}. In particular, C{val_doc} will be assigned the canonical name C{name} iff either: - C{val_doc}'s canonical name is C{UNKNOWN}; or - C{val_doc}'s current canonical name was assigned by this method; but the score of the new name (C{score}) is higher than the score of the current name (C{score_dict[val_doc]}). Note that canonical names will even be assigned to values like integers and C{None}; but these should be harmless.
379b704fe69b6fec075d81a9962a3ce010f19e8e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3512/379b704fe69b6fec075d81a9962a3ce010f19e8e/docindexer.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 6145, 67, 18288, 67, 1973, 12, 2890, 16, 1244, 67, 2434, 16, 508, 16, 4462, 33, 20, 4672, 3536, 12093, 279, 7378, 508, 358, 385, 95, 1125, 67, 2434, 97, 261, 430, 518, 3302, 140...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 6145, 67, 18288, 67, 1973, 12, 2890, 16, 1244, 67, 2434, 16, 508, 16, 4462, 33, 20, 4672, 3536, 12093, 279, 7378, 508, 358, 385, 95, 1125, 67, 2434, 97, 261, 430, 518, 3302, 140...
if (i % 2): table += '<tr style="background-color: rgb(153, 255, 255);">'
if grb.has_data: coldef = '153, 255, 255'
def add_linked_value(table, value, ref): if value>0: if ref>0: table = add(table, '<a href="http://gcn.gsfc.nasa.gov/gcn3/%d.gcn3">%.2f</a>' % (ref, value)) else: table = add(table, '%.2f' % value) else: table = add(table, '&mdash') return table
49433c72fbd74772bdd0097fb2679094b1f8a56f /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/3592/49433c72fbd74772bdd0097fb2679094b1f8a56f/pylal_exttrig_llutils.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 17738, 67, 1132, 12, 2121, 16, 460, 16, 1278, 4672, 309, 460, 34, 20, 30, 309, 1278, 34, 20, 30, 1014, 273, 527, 12, 2121, 16, 2368, 69, 3897, 1546, 2505, 2207, 75, 10305, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 17738, 67, 1132, 12, 2121, 16, 460, 16, 1278, 4672, 309, 460, 34, 20, 30, 309, 1278, 34, 20, 30, 1014, 273, 527, 12, 2121, 16, 2368, 69, 3897, 1546, 2505, 2207, 75, 10305, ...
SqlCommand = """select max(ID) from %s""" % self.Table
SqlCommand = """select max(ID) from %s""" % self.Table
def GetId(self): SqlCommand = """select max(ID) from %s""" % self.Table Record = self.Cur.execute(SqlCommand).fetchall() Id = Record[0][0] if Id == None: Id = self.IdBase return Id
3592477b478c77d2b83738ccc27f41d76c723448 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/914/3592477b478c77d2b83738ccc27f41d76c723448/MetaDataTable.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 31857, 12, 2890, 4672, 8855, 2189, 273, 3536, 4025, 943, 12, 734, 13, 628, 738, 87, 8395, 738, 365, 18, 1388, 5059, 273, 365, 18, 2408, 18, 8837, 12, 5101, 2189, 2934, 5754, 454, 1435,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 31857, 12, 2890, 4672, 8855, 2189, 273, 3536, 4025, 943, 12, 734, 13, 628, 738, 87, 8395, 738, 365, 18, 1388, 5059, 273, 365, 18, 2408, 18, 8837, 12, 5101, 2189, 2934, 5754, 454, 1435,...
SeleniumServer.__inst = SeleniumServer.__impl(config)
SeleniumServer.__inst = SeleniumServer.__impl(config, simulate, logger)
def __init__(self, config={}): # Check whether we already have an instance if SeleniumServer.__inst is None: SeleniumServer.__inst = SeleniumServer.__impl(config)
67e32a34117a00a342fb153c2d5b24bda915d7b4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5718/67e32a34117a00a342fb153c2d5b24bda915d7b4/seleniumserver.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 642, 12938, 4672, 468, 2073, 2856, 732, 1818, 1240, 392, 791, 309, 4352, 17327, 2081, 16186, 8591, 353, 599, 30, 4352, 17327, 2081, 16186, 8591, 273, 4352, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 642, 12938, 4672, 468, 2073, 2856, 732, 1818, 1240, 392, 791, 309, 4352, 17327, 2081, 16186, 8591, 353, 599, 30, 4352, 17327, 2081, 16186, 8591, 273, 4352, ...
(ph.freeze(), instanceId,
(ph.freeze(), instanceId,
def migrate(self): from conary import trove cu = self.cu logMe(3, "Fixing NULL path hashes...") cu.execute("SELECT instanceId FROM TroveInfo " "WHERE data IS NULL and infotype = ?", trove._TROVEINFO_TAG_PATH_HASHES) cu2 = self.db.cursor() for instanceId, in cu: cu2.execute("SELECT path FROM TroveFiles WHERE instanceId=?", instanceId) ph = trove.PathHashes() for path, in cu2: ph.addPath(path) cu2.execute("UPDATE TroveInfo SET data=? " "WHERE instanceId=? and infotype=?", (ph.freeze(), instanceId,
08b788b735e532a2e9fade2921e956446ed9ccea /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8747/08b788b735e532a2e9fade2921e956446ed9ccea/schema.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 13187, 12, 2890, 4672, 628, 225, 356, 814, 1930, 23432, 537, 15985, 273, 365, 18, 6319, 613, 4667, 12, 23, 16, 315, 8585, 310, 3206, 589, 9869, 7070, 13, 15985, 18, 8837, 2932, 4803, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 13187, 12, 2890, 4672, 628, 225, 356, 814, 1930, 23432, 537, 15985, 273, 365, 18, 6319, 613, 4667, 12, 23, 16, 315, 8585, 310, 3206, 589, 9869, 7070, 13, 15985, 18, 8837, 2932, 4803, 1...
val = neg + mode + val
val = mode + val
def from_string(cls, env, string, **kw): filters = string.split('&') kw_strs = ['order', 'group', 'page', 'max'] kw_arys = ['rows'] kw_bools = ['desc', 'groupdesc', 'verbose'] synonyms = TicketSystem(env).get_field_synonyms() constraints = {} cols = [] for filter_ in filters: filter_ = filter_.split('=') if len(filter_) != 2: raise QuerySyntaxError(_('Query filter requires field and ' 'constraints separated by a "="')) field, values = filter_ if not field: raise QuerySyntaxError(_('Query filter requires field name')) # from last char of `field`, get the mode of comparison mode, neg = '', '' if field[-1] in ('~', '^', '$') \ and not field in cls.substitutions: mode = field[-1] field = field[:-1] if field[-1] == '!': neg = '!' field = field[:-1] processed_values = [] for val in values.split('|'): val = neg + mode + val # add mode of comparison processed_values.append(val) try: if isinstance(field, unicode): field = field.encode('utf-8') if field in kw_strs: kw[field] = processed_values[0] elif field in kw_arys: kw[field] = processed_values elif field in kw_bools: kw[field] = True elif field == 'col': cols.extend(synonyms.get(value, value) for value in processed_values) else: constraints.setdefault(synonyms.get(field, field), []).extend(processed_values) except UnicodeError: pass # field must be a str, see `get_href()` report = constraints.pop('report', None) report = kw.pop('report', report) return cls(env, report, constraints=constraints, cols=cols, **kw)
be54a367c165cd6e5e36d04dd2522e0a86862c55 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9317/be54a367c165cd6e5e36d04dd2522e0a86862c55/query.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 628, 67, 1080, 12, 6429, 16, 1550, 16, 533, 16, 2826, 9987, 4672, 3415, 273, 533, 18, 4939, 2668, 10, 6134, 5323, 67, 24432, 273, 10228, 1019, 2187, 296, 1655, 2187, 296, 2433, 2187, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 628, 67, 1080, 12, 6429, 16, 1550, 16, 533, 16, 2826, 9987, 4672, 3415, 273, 533, 18, 4939, 2668, 10, 6134, 5323, 67, 24432, 273, 10228, 1019, 2187, 296, 1655, 2187, 296, 2433, 2187, 2...
"""Format a Decimal class according to the given specifier.
"""Format a Decimal instance according to the given specifier.
def __format__(self, specifier, context=None): """Format a Decimal class according to the given specifier.
f4da77765f8581e31620e9f49c68028e57b0ae85 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/8546/f4da77765f8581e31620e9f49c68028e57b0ae85/decimal.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2139, 972, 12, 2890, 16, 24562, 16, 819, 33, 7036, 4672, 3536, 1630, 279, 11322, 667, 4888, 358, 326, 864, 24562, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2139, 972, 12, 2890, 16, 24562, 16, 819, 33, 7036, 4672, 3536, 1630, 279, 11322, 667, 4888, 358, 326, 864, 24562, 18, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
If the command needs input, it can be defined with the `stdin` argument. It is not possible to give input to the command later.
If the command needs input, it can be defined with the `stdin` argument. It is not possible to give input to the command later.
def start_process(self, command, stdin='', alias=None): """Starts the given command as a background process. Starts the process in the background and sets this process as the current process. The following calls of the keywords `Read Process Output` and `Stop Process` affect this process, unless the keyword `Switch Process` is used. Then these keywords affect the selected process. If the command needs input, it can be defined with the `stdin` argument. It is not possible to give input to the command later. Returns the index of this process. The indexing starts from 1, and it can be used to switch between the processes with the `Switch Process` keyword. To end all processes and reset indexing, the `Stop All Processes` keyword must be used. The optional `alias` is a name for this process that may be used with `Switch Process` instead of the returned index. Example: | Start Process | longlasting.sh | | Do Something | | | ${stdout}= | Read Process Output | | Should Contain | ${stdout} | Expected text | | [Teardown] | Stop All Processes | """ process = _Process(self._process_command(command), stdin) return PROCESSES.register(process, alias)
6f8bd1e5c5b0db8d92d174d89dc27677542321fb /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/7408/6f8bd1e5c5b0db8d92d174d89dc27677542321fb/OperatingSystem.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 787, 67, 2567, 12, 2890, 16, 1296, 16, 8801, 2218, 2187, 2308, 33, 7036, 4672, 3536, 11203, 326, 864, 1296, 487, 279, 5412, 1207, 18, 225, 30620, 326, 1207, 316, 326, 5412, 471, 1678, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 787, 67, 2567, 12, 2890, 16, 1296, 16, 8801, 2218, 2187, 2308, 33, 7036, 4672, 3536, 11203, 326, 864, 1296, 487, 279, 5412, 1207, 18, 225, 30620, 326, 1207, 316, 326, 5412, 471, 1678, ...
NSApp = rt.NSApplication.sharedApplication()
def main(): pool = rt.NSAutoreleasePool() # Load Application Framework: rt.NSBundle.bundleWithPath_( '/System/Library/Frameworks/AppKit.framework').load() NSApp = rt.NSApplication.sharedApplication() win = rt.NSWindow.alloc() frame = ((200.0, 300.0), (250.0, 100.0)) win.initWithContentRect_styleMask_backing_defer_ (frame, 15, 2, 0) win.setTitle_ ('HelloWorld') win.setLevel_ (3) # floating window hel = rt.NSButton.alloc().initWithFrame_ (((10.0, 10.0), (80.0, 80.0))) win.contentView().addSubview_ (hel) hel.setBezelStyle_( 4 ) hel.setTitle_( 'Hello!' ) beep = rt.NSSound.alloc() beep.initWithContentsOfFile_byReference_( '/System/Library/Sounds/tink.aiff', 1 ) hel.setSound_( beep ) bye = rt.NSButton.alloc().initWithFrame_ (((100.0, 10.0), (80.0, 80.0))) win.contentView().addSubview_ (bye) bye.setBezelStyle_( 4 ) bye.setTarget_ (NSApp) bye.setAction_ ('stop:') bye.setEnabled_ ( 1 ) bye.setTitle_( 'Goobye!' ) adios = rt.NSSound.alloc() adios.initWithContentsOfFile_byReference_( '/System/Library/Sounds/Basso.aiff', 1 ) bye.setSound_( adios ) win.display()
bfc21dbdbfe71c52cb450ccf5573e49738f63807 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/97/bfc21dbdbfe71c52cb450ccf5573e49738f63807/HelloWorld.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 13332, 225, 2845, 273, 8253, 18, 3156, 7150, 479, 2508, 2864, 1435, 225, 468, 4444, 4257, 13472, 30, 8253, 18, 3156, 3405, 18, 9991, 1190, 743, 67, 12, 1173, 3163, 19, 9313, 19, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 13332, 225, 2845, 273, 8253, 18, 3156, 7150, 479, 2508, 2864, 1435, 225, 468, 4444, 4257, 13472, 30, 8253, 18, 3156, 3405, 18, 9991, 1190, 743, 67, 12, 1173, 3163, 19, 9313, 19, ...
if len(output) > MAX_OUTPUT:
if len(output) > MAX_OUTPUT or output.count('\n') > MAX_OUTPUT_LINES:
def set_output_text(self, output, html, sage=None): if output.count('<?__SAGE__TEXT>') > 1: html = '<h3><font color="red">WARNING: multiple @interacts in one cell disabled (not yet implemented).</font></h3>' output = ''
1f91af200ecd331f4bd5314a6dfe9593a9d0244d /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9890/1f91af200ecd331f4bd5314a6dfe9593a9d0244d/cell.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 444, 67, 2844, 67, 955, 12, 2890, 16, 876, 16, 1729, 16, 272, 410, 33, 7036, 4672, 309, 876, 18, 1883, 2668, 12880, 972, 55, 2833, 972, 5151, 1870, 13, 405, 404, 30, 1729, 273, 2368,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 444, 67, 2844, 67, 955, 12, 2890, 16, 876, 16, 1729, 16, 272, 410, 33, 7036, 4672, 309, 876, 18, 1883, 2668, 12880, 972, 55, 2833, 972, 5151, 1870, 13, 405, 404, 30, 1729, 273, 2368,...
return dragatoms, baggage, dragchunks
assert len(fastatoms) == len(dragatoms) + sum([len(chunk.atoms) for chunk in dragchunks]) return dragatoms, slowatoms.values(), dragchunks
def doit(at): mol = at.molecule atoms = atomsets.setdefault(id(mol), {}) # dragged atoms which are in this mol, so far, as atom.key -> atom atoms[at.key] = at # atoms serves later to count them, to let us make fragments, and to identify the source mol
a4a3d79e9974a332eb95fe63b46876035a4c80ae /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/11221/a4a3d79e9974a332eb95fe63b46876035a4c80ae/selectMode.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 305, 12, 270, 4672, 12629, 273, 622, 18, 81, 10545, 9006, 273, 3179, 4424, 18, 542, 1886, 12, 350, 12, 21260, 3631, 2618, 13, 468, 8823, 2423, 9006, 1492, 854, 316, 333, 12629, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 305, 12, 270, 4672, 12629, 273, 622, 18, 81, 10545, 9006, 273, 3179, 4424, 18, 542, 1886, 12, 350, 12, 21260, 3631, 2618, 13, 468, 8823, 2423, 9006, 1492, 854, 316, 333, 12629, 16...
self.doLaterList.add(task)
index = self.doLaterList.add(task)
def __spawnDoLater(self, task): assert(TaskManager.notify.debug('spawning doLater: %s' % (task))) # Add this task to the nameDict nameList = self.nameDict.setdefault(task.name, []) nameList.append(task) # be sure to ask the globalClock for the current frame time # rather than use a cached value; globalClock's frame time may # have been synced since the start of this frame currentTime = globalClock.getFrameTime() task.setStartTimeFrame(currentTime, self.currentFrame) # Cache the time we should wake up for easier sorting task.wakeTime = task.starttime + task.delayTime self.doLaterList.add(task) if self.fVerbose: # Alert the world, a new task is born! messenger.send('TaskManager-spawnDoLater', sentArgs = [task, task.name, index]) return task
9e78047e75d44864814284b16739c72c82e0d98d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8543/9e78047e75d44864814284b16739c72c82e0d98d/Task.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1752, 9533, 3244, 20607, 12, 2890, 16, 1562, 4672, 1815, 12, 2174, 1318, 18, 12336, 18, 4148, 2668, 1752, 2219, 2093, 741, 20607, 30, 738, 87, 11, 738, 261, 4146, 20349, 468, 1436,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1752, 9533, 3244, 20607, 12, 2890, 16, 1562, 4672, 1815, 12, 2174, 1318, 18, 12336, 18, 4148, 2668, 1752, 2219, 2093, 741, 20607, 30, 738, 87, 11, 738, 261, 4146, 20349, 468, 1436,...
'res_model': self.ressource[0], 'res_id': self.ressource[1],
'res_model': self.resource[0], 'res_id': self.resource[1],
def _sig_link(self, widget): filename = common.file_selection(_('Select file'), parent=self.win) if not filename: return try: if filename: fname = os.path.basename(filename) args = ('object', 'execute', 'ir.attachment', 'create', { 'name': fname, 'res_model': self.ressource[0], 'res_id': self.ressource[1], 'link': filename, }) try: obj_id = rpc.execute(*args) except Exception, exception: obj_id = rpc.process_exception(exception, self.win, *args) if not obj_id: return self.reload(preview=False) self.preview(int(obj_id)) except IOError: common.message(_('Can not open file!'), self.win, gtk.MESSAGE_ERROR)
bac16d4eaf047074802febdef71309d975cf8061 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9151/bac16d4eaf047074802febdef71309d975cf8061/attachment.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 7340, 67, 1232, 12, 2890, 16, 3604, 4672, 1544, 273, 2975, 18, 768, 67, 10705, 24899, 2668, 3391, 585, 19899, 982, 33, 2890, 18, 8082, 13, 309, 486, 1544, 30, 327, 775, 30, 309, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 7340, 67, 1232, 12, 2890, 16, 3604, 4672, 1544, 273, 2975, 18, 768, 67, 10705, 24899, 2668, 3391, 585, 19899, 982, 33, 2890, 18, 8082, 13, 309, 486, 1544, 30, 327, 775, 30, 309, ...
self.queueStringCommand(command).addErrback(self.fail)
self.queueStringCommand(command, public=0).addErrback(self.fail)
def queueLogin(self): """Initialise the connection.
10f1a9e0ff16a1ec1371e9a9174eb2e0fcc81c0b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12595/10f1a9e0ff16a1ec1371e9a9174eb2e0fcc81c0b/ftp.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2389, 5358, 12, 2890, 4672, 3536, 4435, 784, 326, 1459, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2389, 5358, 12, 2890, 4672, 3536, 4435, 784, 326, 1459, 18, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
var_list = [v for v in var_list if v.is_imported is True]
var_list = [v for v in var_list if v.is_inherited is True]
def select_variables(self, group=None, value_type=None, inherited=None, public=None, imported=None): """ Return a specified subset of this class's L{sorted_variables} list. If C{value_type} is given, then only return variables whose values have the specified type. If C{group} is given, then only return variables that belong to the specified group. If C{inherited} is True, then only return inherited variables; if C{inherited} is False, then only return local variables.
a31498ac27063c18b41430e7c078ea8b94bb718e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11420/a31498ac27063c18b41430e7c078ea8b94bb718e/apidoc.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2027, 67, 7528, 12, 2890, 16, 1041, 33, 7036, 16, 460, 67, 723, 33, 7036, 16, 12078, 33, 7036, 16, 1071, 33, 7036, 16, 9101, 33, 7036, 4672, 3536, 2000, 279, 1269, 7931, 434, 333, 66...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2027, 67, 7528, 12, 2890, 16, 1041, 33, 7036, 16, 460, 67, 723, 33, 7036, 16, 12078, 33, 7036, 16, 1071, 33, 7036, 16, 9101, 33, 7036, 4672, 3536, 2000, 279, 1269, 7931, 434, 333, 66...
'\rRegion (%+03.0f/%+03.0f S/N %+04.0f/%+04.0f W/E): %d empty cells.\n' % (tuple(box) + (n_empty_cells,)))
'\rRegion (%+03.0f/%+03.0f S/N %+04.0f/%+04.0f W/E): %d empty cell%s.\n' % (tuple(box) + (n_empty_cells,plural_suffix)))
def iter_subbox_grid(station_records, max_months, first_year, radius): """Convert the input *station_records*, into a gridded anomaly dataset which is returned as an iterator. *max_months* is the maximum number of months in any station record. *first_year* is the first year in the dataset. *radius* is the combining radius in kilometres. """ station_records = list(station_records) log = sys.stdout # Critical radius as an angle of arc arc = radius / earth.radius arcdeg = arc * 180 / math.pi regions = list(eqarea.gridsub()) for region in regions: box, subboxes = region[0], list(region[1]) # Extend box, by half a box east and west and by arc north # and south. extent = [box[0] - arcdeg, box[1] + arcdeg, box[2] - 0.5 * (box[3] - box[2]), box[3] + 0.5 * (box[3] - box[2])] if box[0] <= -90 or box[1] >= 90: # polar extent[2] = -180.0 extent[3] = +180.0 region_records = list(inbox(station_records, *extent)) # Descending sort by number of good records # TODO: Switch to using Python's sort method here, although it # will change the results. sort(region_records, lambda x,y: y.good_count - x.good_count) # Count how many cells are empty n_empty_cells = 0 # Used to generate the "subbox at" rows in the log. lastcentre = (None, None) for subbox in subboxes: # Select and weight stations centre = eqarea.centre(subbox) log.write("\rsubbox at %+05.1f%+06.1f (%d empty)" % ( centre + (n_empty_cells,))) log.flush() lastcentre = centre # Of possible station records for this region, filter for those # from stations within radius of subbox centre. incircle_records = list(incircle(region_records, arc, *centre)) # Combine data. subbox_series = [MISSING] * max_months if len(incircle_records) == 0: box_obj = giss_data.SubboxRecord(subbox_series, box=list(subbox), stations=0, station_months=0, d=MISSING) n_empty_cells += 1 yield box_obj continue # Initialise data with first station record = incircle_records[0] total_good_months = record.good_count total_stations = 1 max_weight = record.weight offset = record.rel_first_month - 1 a = record.series # just a temporary subbox_series[offset:offset + len(a)] = a weight = [0.0] * max_months for i in range(len(a)): if valid(a[i]): weight[i + offset] = record.weight # Add in the remaining stations for record in incircle_records[1:]: # TODO: A StationMethod method to produce a padded data series # would be good here. Hence we could just do: # new = record.padded_series(max_months) new = [MISSING] * max_months aa, bb = record.rel_first_month, record.rel_last_month new[aa - 1:bb] = record.series station_months = series.combine( subbox_series, weight, new, record.weight, record.rel_first_year, record.rel_last_year + 1, parameters.gridding_min_overlap) total_good_months += station_months if station_months == 0: continue total_stations += 1 if max_weight < record.weight: max_weight = record.weight series.anomalize(subbox_series, parameters.gridding_reference_period, first_year) box_obj = giss_data.SubboxRecord(subbox_series, n=max_months, box=list(subbox), stations=total_stations, station_months=total_good_months, d=radius*(1-max_weight)) yield box_obj log.write( '\rRegion (%+03.0f/%+03.0f S/N %+04.0f/%+04.0f W/E): %d empty cells.\n' % (tuple(box) + (n_empty_cells,))) log.write("\n")
203866b6b82b8ed20bc7ad44b7213be5111f7dd2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6890/203866b6b82b8ed20bc7ad44b7213be5111f7dd2/step3.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1400, 67, 1717, 2147, 67, 5222, 12, 17894, 67, 7094, 16, 943, 67, 27584, 16, 1122, 67, 6874, 16, 5725, 4672, 3536, 2723, 326, 810, 380, 17894, 67, 7094, 14, 16, 1368, 279, 3068, 785, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1400, 67, 1717, 2147, 67, 5222, 12, 17894, 67, 7094, 16, 943, 67, 27584, 16, 1122, 67, 6874, 16, 5725, 4672, 3536, 2723, 326, 810, 380, 17894, 67, 7094, 14, 16, 1368, 279, 3068, 785, ...
this = apply(_quickfix.new_StrikeMultiplier, args)
this = _quickfix.new_StrikeMultiplier(*args)
def __init__(self, *args): this = apply(_quickfix.new_StrikeMultiplier, args) try: self.this.append(this) except: self.this = this
7e632099fd421880c8c65fb0cf610d338d115ee9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8819/7e632099fd421880c8c65fb0cf610d338d115ee9/quickfix.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 380, 1968, 4672, 333, 273, 389, 19525, 904, 18, 2704, 67, 1585, 2547, 23365, 30857, 1968, 13, 775, 30, 365, 18, 2211, 18, 6923, 12, 2211, 13, 1335, 30, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 380, 1968, 4672, 333, 273, 389, 19525, 904, 18, 2704, 67, 1585, 2547, 23365, 30857, 1968, 13, 775, 30, 365, 18, 2211, 18, 6923, 12, 2211, 13, 1335, 30, ...
mn.pluralequation ='(n != 1)'
mn.pluralequation = '(n != 1)'
def create_default_languages(): from pootle_app.core import Language af = Language(code="af") af.fullname = u"Afrikaans" af.specialchars = u"ëïêôûáéíóúý" af.nplurals = '2' af.pluralequation = "(n != 1)" af.save()
705dd9e8bba1245496893550d0b060f0049a0893 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/11388/705dd9e8bba1245496893550d0b060f0049a0893/initdb.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 1886, 67, 14045, 13332, 628, 293, 1632, 298, 67, 2910, 18, 3644, 1930, 9889, 225, 10073, 273, 9889, 12, 710, 1546, 1727, 7923, 10073, 18, 21885, 273, 582, 6, 12664, 566, 7282, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 1886, 67, 14045, 13332, 628, 293, 1632, 298, 67, 2910, 18, 3644, 1930, 9889, 225, 10073, 273, 9889, 12, 710, 1546, 1727, 7923, 10073, 18, 21885, 273, 582, 6, 12664, 566, 7282, ...
s = s + '<a href="/channel/?ref=' + ref + '">' + name + '</a>'
s = s + '<a href="/channel?ref=' + ref + '">' + name + '</a>'
def render(self, req): s = '<br/>'
03455b02a8d76c3dcd90a8fa9bb3fe4f47b04f11 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12172/03455b02a8d76c3dcd90a8fa9bb3fe4f47b04f11/plugin.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1743, 12, 2890, 16, 1111, 4672, 272, 273, 2368, 2848, 18280, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1743, 12, 2890, 16, 1111, 4672, 272, 273, 2368, 2848, 18280, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
if isinstance(filename, str): filename = filename.encode(filesystemencoding)
assert isinstance(filename, str)
def askopenfile(self): dir, base = self.defaultfilename("open") if not self.opendialog: self.opendialog = tkFileDialog.Open(master=self.text, filetypes=self.filetypes) filename = self.opendialog.show(initialdir=dir, initialfile=base) if isinstance(filename, str): filename = filename.encode(filesystemencoding) return filename
64663ca9ca89d89a4f520f3d755de4fbbb0ec79c /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/8125/64663ca9ca89d89a4f520f3d755de4fbbb0ec79c/IOBinding.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6827, 3190, 768, 12, 2890, 4672, 1577, 16, 1026, 273, 365, 18, 1886, 3459, 2932, 3190, 7923, 309, 486, 365, 18, 556, 409, 3529, 30, 365, 18, 556, 409, 3529, 273, 13030, 812, 6353, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6827, 3190, 768, 12, 2890, 4672, 1577, 16, 1026, 273, 365, 18, 1886, 3459, 2932, 3190, 7923, 309, 486, 365, 18, 556, 409, 3529, 30, 365, 18, 556, 409, 3529, 273, 13030, 812, 6353, 18, ...
sage: maxima.gcd._sage_doc_()
sage: maxima.gcd._sage_doc_()
def _sage_doc_(self): """ EXAMPLES: sage: maxima.gcd._sage_doc_() #not tested -- Function: gcd (<p_1>, <p_2>, <x_1>, ...) ... """ M = self._parent return M.help(self._name)
327892a197ad345f40545c760b7d91fabbe80a24 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9890/327892a197ad345f40545c760b7d91fabbe80a24/maxima.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 87, 410, 67, 2434, 67, 12, 2890, 4672, 3536, 5675, 8900, 11386, 30, 272, 410, 30, 943, 13888, 18, 75, 4315, 6315, 87, 410, 67, 2434, 67, 1435, 468, 902, 18432, 1493, 4284, 30, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 87, 410, 67, 2434, 67, 12, 2890, 4672, 3536, 5675, 8900, 11386, 30, 272, 410, 30, 943, 13888, 18, 75, 4315, 6315, 87, 410, 67, 2434, 67, 1435, 468, 902, 18432, 1493, 4284, 30, 1...
"""
"""Controls the view access to parts of the repository.
def __str__(self): return self.action
0091f73d9ddb2e28de9da1a025ba0c0cfaf2b458 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9317/0091f73d9ddb2e28de9da1a025ba0c0cfaf2b458/api.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 701, 972, 12, 2890, 4672, 327, 365, 18, 1128, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 701, 972, 12, 2890, 4672, 327, 365, 18, 1128, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
if (OMIT.count("HELIX")==0): WARNINGS.append("HELIX is currently nonoperational") WARNINGS.append("I have automatically added this command-line option: --no-helix") OMIT.append("HELIX") if (os.path.isdir(os.path.join(THIRDPARTY, "win-libs-vc7", "miles"))==0): if (OMIT.count("MILES")==0): WARNINGS.append("You do not have a copy of MILES sound system") WARNINGS.append("I have automatically added this command-line option: --no-miles") OMIT.append("MILES") for x in PACKAGES: if (OMIT.count(x)==0): if (DTOOLCONFIG.has_key("HAVE_"+x)): DTOOLCONFIG["HAVE_"+x] = '1' DTOOLCONFIG["HAVE_NET"] = DTOOLCONFIG["HAVE_NSPR"] if (OMIT.count("NVIDIACG")==0): DTOOLCONFIG["HAVE_CG"] = '1' DTOOLCONFIG["HAVE_CGGL"] = '1' if (OPTIMIZE <= 3): if (DTOOLCONFIG["HAVE_NET"] != 'UNDEF'): DTOOLCONFIG["DO_PSTATS"] = '1' if (OPTIMIZE <= 3): DTOOLCONFIG["DO_COLLISION_RECORDING"] = '1' if (OPTIMIZE <= 3): DTOOLCONFIG["DO_MEMORY_USAGE"] = '1' if (OPTIMIZE <= 3): DTOOLCONFIG["NOTIFY_DEBUG"] = '1' if (sys.platform != "win32"): BUILTLIB = os.path.abspath(PREFIX+"/lib") try: LDPATH = [] f = file("/etc/ld.so.conf","r") for line in f: LDPATH.append(line.rstrip()) f.close() except: LDPATH = [] if (os.environ.has_key("LD_LIBRARY_PATH")): LDPATH = LDPATH + os.environ["LD_LIBRARY_PATH"].split(":") if (LDPATH.count(BUILTLIB)==0): WARNINGS.append("Caution: the "+PREFIX+"/lib directory is not in LD_LIBRARY_PATH") WARNINGS.append("or /etc/ld.so.conf. You must add it before using panda.") if (os.environ.has_key("LD_LIBRARY_PATH")): os.environ["LD_LIBRARY_PATH"] = BUILTLIB + ":" + os.environ["LD_LIBRARY_PATH"] else: os.environ["LD_LIBRARY_PATH"] = BUILTLIB def printStatus(header,warnings): global VERBOSE if VERBOSE >= -2: print "" print "-------------------------------------------------------------------" print header tkeep = "" tomit = "" for x in PACKAGES: if (OMIT.count(x)==0): tkeep = tkeep + x + " " else: tomit = tomit + x + " " print "Makepanda: Prefix Directory:",PREFIX print "Makepanda: Compiler:",COMPILER print "Makepanda: Optimize:",OPTIMIZE print "Makepanda: Keep Pkg:",tkeep print "Makepanda: Omit Pkg:",tomit print "Makepanda: Thirdparty dir:",THIRDPARTY print "Makepanda: DirectX SDK dir:",DIRECTXSDK print "Makepanda: Verbose vs. Quiet Level:",VERBOSE if (GENMAN): print "Makepanda: Generate API reference manual" else : print "Makepanda: Don't generate API reference manual" if (sys.platform == "win32"): if INSTALLER: print "Makepanda: Build installer, using",COMPRESSOR else : print "Makepanda: Don't build installer" print "Makepanda: Version ID: "+VERSION for x in warnings: print "Makepanda: "+x print "-------------------------------------------------------------------" print "" sys.stdout.flush() printStatus("Makepanda Initial Status Report", WARNINGS) MakeDirectory(PREFIX) MakeDirectory(PREFIX+"/bin") MakeDirectory(PREFIX+"/lib") MakeDirectory(PREFIX+"/etc") MakeDirectory(PREFIX+"/plugins") MakeDirectory(PREFIX+"/include") MakeDirectory(PREFIX+"/include/parser-inc") MakeDirectory(PREFIX+"/include/parser-inc/openssl") MakeDirectory(PREFIX+"/include/parser-inc/Cg") MakeDirectory(PREFIX+"/include/openssl") MakeDirectory(PREFIX+"/tmp") if (OMIT.count("PYTHON")==0): MakeDirectory(PREFIX+"/direct") MakeDirectory(PREFIX+"/pandac") MakeDirectory(PREFIX+"/pandac/input") def PkgSelected(pkglist, pkg): if (pkglist.count(pkg)==0): return 0 if (OMIT.count(pkg)): return 0 return 1 ALLIN=[] global CxxIncludeCache CxxIncludeCache = {} iCachePath=PREFIX+"/tmp/makepanda-icache" try: icache = open(iCachePath,'rb') except: icache = 0 if (icache!=0): CxxIncludeCache = cPickle.load(icache) icache.close() global CxxIncludeRegex CxxIncludeRegex = re.compile('^[ \t]*[ def CxxGetIncludes(path): date = filedate(path) if (CxxIncludeCache.has_key(path)): cached = CxxIncludeCache[path] if (cached[0]==date): return cached[1] try: sfile = open(path, 'rb') except: exit("Cannot open source file \""+path+"\" for reading.") include = [] for line in sfile: match = CxxIncludeRegex.match(line,0) if (match): incname = match.group(1) include.append(incname) sfile.close() CxxIncludeCache[path] = [date, include] return include def CxxFindSource(name, ipath): for dir in ipath: if (dir == "."): full = name else: full = dir + "/" + name if filedate(full) > 0: return full return 0 def CxxFindHeader(srcfile, incfile, ipath): if (incfile[:1]=="."): last = srcfile.rfind("/") if (last < 0): exit("CxxFindHeader cannot handle this case srcdir = srcfile[:last+1] while (incfile[:1]=="."): if (incfile[:2]=="./"): incfile = incfile[2:] elif (incfile[:3]=="../"): incfile = incfile[3:] last = srcdir[:-1].rfind("/") if (last < 0): exit("CxxFindHeader cannot handle this case srcdir = srcdir[:last+1] else: exit("CxxFindHeader cannot handle this case full = srcdir + incfile if filedate(full) > 0: return full return 0 else: return CxxFindSource(incfile, ipath) global CxxIgnoreHeader global CxxDependencyCache CxxIgnoreHeader = {} CxxDependencyCache = {} def CxxCalcDependencies(srcfile, ipath, ignore): if (CxxDependencyCache.has_key(srcfile)): return CxxDependencyCache[srcfile] if (ignore.count(srcfile)): return [] dep = {} dep[srcfile] = 1 includes = CxxGetIncludes(srcfile) for include in includes: if (CxxIgnoreHeader.has_key(include)==0): header = CxxFindHeader(srcfile, include, ipath) if (header!=0): if (ignore.count(header)==0): hdeps = CxxCalcDependencies(header, ipath, [srcfile]+ignore) for x in hdeps: dep[x] = 1 else: print "CAUTION: header file "+include+" cannot be found in "+srcfile+" IPATH="+str(ipath) result = dep.keys() CxxDependencyCache[srcfile] = result return result def CxxCalcDependenciesAll(srcfiles, ipath): dep = {} for srcfile in srcfiles: for x in CxxCalcDependencies(srcfile, ipath, []): dep[x] = 1 return dep.keys() def ReadCvsEntries(dir): try: if (os.path.isfile(dir+"/CVS-Entries")): srchandle = open(dir+"/CVS-Entries", "r") else: srchandle = open(dir+"/CVS/Entries", "r") files = [] for line in srchandle: if (line[0]=="/"): s = line.split("/",2) if (len(s)==3): files.append(s[1]) srchandle.close() files.sort() return files except: return 0 def CopyFile(dstfile,srcfile): if (dstfile[-1]=='/'): dstdir = dstfile fnl = srcfile.rfind("/") if (fnl < 0): fn = srcfile else: fn = srcfile[fnl+1:] dstfile = dstdir + fn if (older([dstfile],srcfile)): global VERBOSE if VERBOSE >= 1: print "Copying \"%s\" --> \"%s\""%(srcfile, dstfile) WriteFile(dstfile,ReadFile(srcfile)) def CopyAllFiles(dstdir, srcdir, suffix=""): suflen = len(suffix) files = os.listdir(srcdir) for x in files: if (os.path.isfile(srcdir+x)): if (suflen==0) or (x[-suflen:]==suffix): CopyFile(dstdir+x, srcdir+x) def CopyAllHeaders(dir, skip=[]): dirlist = os.listdir(dir) dirlist.sort() files = fnmatch.filter(dirlist,"*.h")+fnmatch.filter(dirlist,"*.I")+fnmatch.filter(dirlist,"*.T") copied = [] if (skip!="ALL"): for filename in files: if (skip.count(filename)==0): srcfile = dir + "/" + filename dstfile = PREFIX + "/include/" + filename if (older([dstfile],srcfile)): copied.append(filename) WriteFile(dstfile,ReadFile(srcfile)) cvsentries = ReadCvsEntries(dir) if (cvsentries != 0): cvsheaders = fnmatch.filter(cvsentries,"*.h")+fnmatch.filter(cvsentries,"*.I")+fnmatch.filter(cvsentries,"*.T") for x in SetDifference(files, cvsheaders): if ((skip=="ALL") or (skip.count(x)==0)): msg = "WARNING: header file %s is in your directory, but not in CVS"%(dir+"/"+x) print msg WARNINGS.append(msg) for x in SetDifference(cvsheaders, files): if ((skip=="ALL") or (skip.count(x)==0)): msg = "WARNING: header file %s is CVS, but not in your directory"%(dir+"/"+x) print msg WARNINGS.append(msg) def CopyTree(dstdir,srcdir): if (os.path.isdir(dstdir)): return 0 if (COMPILER=="MSVC7"): cmd = 'xcopy.exe /I/Y/E/Q "' + srcdir + '" "' + dstdir + '"' if (COMPILER=="LINUXA"): cmd = 'cp --recursive --force ' + srcdir + ' ' + dstdir oscmd(cmd) updatefiledate(dstdir) def BuildWorker(taskqueue, donequeue): while (1): task = taskqueue.get() sys.stdout.flush() if (task == 0): return try: apply(task[0], task[1]) except: exit("Makepanda contains a bug. Re-run makepanda single-threaded to find it.") donequeue.put(task) def AllSourcesReady(task, pending): sources = task[3] for x in sources: if (pending.has_key(x)): return 0 return 1 def ParDependencyQueue(tasklist): global BUILTANYTHING donequeue=Queue.Queue() taskqueue=Queue.Queue() pending = {} for task in tasklist: for target in task[2]: pending[target] = 1 for i in range(WORKERCOUNT): th = threading.Thread(target=BuildWorker, args=[taskqueue,donequeue]) th.setDaemon(1) th.start() tasksqueued = 0 while (1): print "tasks queued: "+str(tasksqueued) if (tasksqueued < WORKERCOUNT*2): extras = [] for task in tasklist: if (tasksqueued < WORKERCOUNT*5) & (AllSourcesReady(task, pending)): if (older(task[2], task[3])): tasksqueued += 1 BUILTANYTHING=1 taskqueue.put(task) else: for target in task[2]: del pending[target] else: extras.append(task) tasklist = extras sys.stdout.flush() if (tasksqueued == 0): break donetask = donequeue.get() sys.stdout.flush() tasksqueued -= 1 for target in donetask[2]: updatefiledate(target) del pending[target] for i in range(WORKERCOUNT): taskqueue.put(0) if (len(tasklist)>0): exit("Dependency problem - task unsatisfied: "+str(tasklist[0][2])) def SeqDependencyQueue(tasklist): global BUILTANYTHING for task in tasklist: if (older(task[2], task[3])): BUILTANYTHING=1 apply(task[0], task[1]) for target in task[2]: updatefiledate(target) def RunDependencyQueue(tasklist): if (WORKERCOUNT>1): ParDependencyQueue(tasklist) else: SeqDependencyQueue(tasklist) def DependencyBuild(fn, args, targets, sources): DEPENDENCYQUEUE.append([fn,args,targets,sources]) def CompileCxxMSVC7(wobj,fullsrc,ipath,opts): cmd = "cl.exe /Fo" + wobj + " /nologo /c" if (OMIT.count("PYTHON")==0): cmd = cmd + " /I" + PREFIX + "/python/include" if (opts.count("DXSDK")): cmd = cmd + ' /I"' + DIRECTXSDK + '/include"' for ver in ["MAYA5","MAYA6","MAYA65"]: if (opts.count(ver)): cmd = cmd + ' /I"' + MAYASDK[ver] + '/include"' for max in ["MAX5","MAX6","MAX7"]: if (PkgSelected(opts,max)): cmd = cmd + ' /I"' + MAXSDK[max] + '/include" /I"' + MAXSDKCS[max] + '" /D' + max for pkg in PACKAGES: if (pkg[:4] != "MAYA") and PkgSelected(opts,pkg): cmd = cmd + " /I" + THIRDPARTY + "/win-libs-vc7/" + pkg.lower() + "/include" for x in ipath: cmd = cmd + " /I" + x if (opts.count('NOFLOATWARN')): cmd = cmd + ' /wd4244 /wd4305' if (opts.count("WITHINPANDA")): cmd = cmd + ' /DWITHIN_PANDA' if (opts.count("MSFORSCOPE")==0): cmd = cmd + ' /Zc:forScope' optlevel = getoptlevel(opts,OPTIMIZE) if (optlevel==1): cmd = cmd + " /MD /Zi /RTCs /GS" if (optlevel==2): cmd = cmd + " /MD /Zi " if (optlevel==3): cmd = cmd + " /MD /Zi /O2 /Ob2 /DFORCE_INLINING " if (optlevel==4): cmd = cmd + " /MD /Zi /Ox /Ob2 /DFORCE_INLINING /GL " cmd = cmd + " /Fd" + wobj[:-4] + ".pdb" building = getbuilding(opts) if (building): cmd = cmd + " /DBUILDING_" + building cmd = cmd + " /EHsc /Zm300 /DWIN32_VC /DWIN32 /W3 " + fullsrc oscmd(cmd) def CompileCxxLINUXA(wobj,fullsrc,ipath,opts): if (fullsrc[-2:]==".c"): cmd = 'gcc -c -o ' + wobj else: cmd = 'g++ -ftemplate-depth-30 -c -o ' + wobj if (OMIT.count("PYTHON")==0): cmd = cmd + ' -I"' + PYTHONSDK + '"' if (PkgSelected(opts,"VRPN")): cmd = cmd + ' -I' + THIRDPARTY + '/linux-libs-a/vrpn/include' if (PkgSelected(opts,"FFTW")): cmd = cmd + ' -I' + THIRDPARTY + '/linux-libs-a/fftw/include' if (PkgSelected(opts,"FMOD")): cmd = cmd + ' -I' + THIRDPARTY + '/linux-libs-a/fmod/include' if (PkgSelected(opts,"NVIDIACG")): cmd = cmd + ' -I' + THIRDPARTY + '/linux-libs-a/nvidiacg/include' if (PkgSelected(opts,"NSPR")): cmd = cmd + ' -I' + THIRDPARTY + '/linux-libs-a/nspr/include' if (PkgSelected(opts,"FREETYPE")): cmd = cmd + ' -I/usr/include/freetype2' for x in ipath: cmd = cmd + ' -I' + x if (opts.count("WITHINPANDA")): cmd = cmd + ' -DWITHIN_PANDA' optlevel = getoptlevel(opts,OPTIMIZE) if (optlevel==1): cmd = cmd + " -g" if (optlevel==2): cmd = cmd + " -O1" if (optlevel==3): cmd = cmd + " -O2" if (optlevel==4): cmd = cmd + " -O2" building = getbuilding(opts) if (building): cmd = cmd + " -DBUILDING_" + building cmd = cmd + ' ' + fullsrc oscmd(cmd) def EnqueueCxx(obj=0,src=0,ipath=[],opts=[],xdep=[]): if ((obj==0)|(src==0)): exit("syntax error in EnqueueCxx directive") ipath = [PREFIX+"/tmp"] + ipath + [PREFIX+"/include"] fullsrc = CxxFindSource(src, ipath) if (fullsrc == 0): exit("Cannot find source file "+src) dep = CxxCalcDependencies(fullsrc, ipath, []) + xdep if (COMPILER=="MSVC7"): wobj = PREFIX+"/tmp/"+obj DependencyBuild(CompileCxxMSVC7, [wobj,fullsrc,ipath,opts], [wobj], dep) if (COMPILER=="LINUXA"): wobj = PREFIX+"/tmp/" + obj[:-4] + ".o" DependencyBuild(CompileCxxLINUXA, [wobj,fullsrc,ipath,opts], [wobj], dep) def CompileBisonMSVC7(pre, dsth, dstc, wobj, ipath, opts, src): fn = os.path.basename(src) CopyFile(PREFIX+"/tmp/", src) CopyFile(PREFIX+"/tmp/", "thirdparty/win-util/bison.simple") bisonFullPath=os.path.abspath("thirdparty/win-util/bison.exe") cmd = "cd "+PREFIX+"/tmp & "+bisonFullPath cmd = cmd.replace("/","\\") oscmd(cmd + " -y -d -p " + pre + " " + fn) CopyFile(dstc, PREFIX+"/tmp/y_tab.c") CopyFile(dsth, PREFIX+"/tmp/y_tab.h") CompileCxxMSVC7(wobj,dstc,ipath,opts) def CompileBisonLINUXA(pre, dsth, dstc, wobj, ipath, opts, src): fn = os.path.basename(src) CopyFile(PREFIX+"/tmp/", src) oscmd("cd "+PREFIX+"/tmp ; bison -y -d -p "+pre+" "+fn) CopyFile(dstc, PREFIX+"/tmp/y.tab.c") CopyFile(dsth, PREFIX+"/tmp/y.tab.h") CompileCxxLINUXA(wobj,dstc,ipath,opts) def EnqueueBison(ipath=0,opts=0,pre=0,obj=0,dsth=0,src=0): if ((ipath==0)|(opts==0)|(pre==0)|(obj==0)|(dsth==0)|(src==0)): exit("syntax error in EnqueueBison directive") dstc=obj[:-4]+".cxx" if (OMIT.count("BISON")): dir = os.path.dirname(src) CopyFile(PREFIX+"/tmp/"+dstc, dir+"/"+dstc+".prebuilt") CopyFile(PREFIX+"/tmp/"+dsth, dir+"/"+dsth+".prebuilt") EnqueueCxx(ipath=ipath,opts=opts,obj=obj,src=dstc) return() ipath = [PREFIX+"/tmp"] + ipath + [PREFIX+"/include"] fullsrc = CxxFindSource(src, ipath) if (fullsrc == 0): exit("Cannot find source file "+src) dstc=PREFIX+"/tmp/"+dstc dsth=PREFIX+"/tmp/"+dsth if (COMPILER=="MSVC7"): wobj=PREFIX+"/tmp/"+obj DependencyBuild(CompileBisonMSVC7, [pre,dsth,dstc,wobj,ipath,opts,fullsrc], [dsth, wobj], [fullsrc]) if (COMPILER=="LINUXA"): wobj=PREFIX+"/tmp/"+obj[:-4]+".o" DependencyBuild(CompileBisonLINUXA, [pre,dsth,dstc,wobj,ipath,opts,fullsrc], [dsth, wobj], [fullsrc]) def CompileFlexMSVC7(pre,dst,src,wobj,ipath,opts,dashi): sys.stdout.flush() fn = os.path.basename(src) CopyFile(PREFIX+"/tmp/", src) flexFullPath=os.path.abspath("thirdparty/win-util/flex.exe") flexFullPath=flexFullPath.replace("/","\\") cmd = "cd "+PREFIX+"/tmp & "+flexFullPath cmd = cmd.replace("/","\\") if (dashi): oscmd(cmd +" -i -P" + pre + " -olex.yy.c " + fn) else: oscmd(cmd +" -P" + pre + " -olex.yy.c " + fn) replaceInFile(PREFIX+'/tmp/lex.yy.c', dst, ' CompileCxxMSVC7(wobj,dst,ipath,opts) def CompileFlexLINUXA(pre,dst,src,wobj,ipath,opts,dashi): fn = os.path.basename(src) CopyFile(PREFIX+"/tmp/", src) if (dashi): oscmd("cd "+PREFIX+"/tmp ; flex -i -P" + pre + " -olex.yy.c " + fn) else: oscmd("cd "+PREFIX+"/tmp ; flex -P" + pre + " -olex.yy.c " + fn) oscmd('cp '+PREFIX+'/tmp/lex.yy.c '+dst) CompileCxxLINUXA(wobj,dst,ipath,opts) def EnqueueFlex(ipath=0,opts=0,pre=0,obj=0,src=0,dashi=0): if ((ipath==0)|(opts==0)|(pre==0)|(obj==0)|(src==0)): exit("syntax error in EnqueueFlex directive") dst=obj[:-4]+".cxx" if (OMIT.count("FLEX")): dir = os.path.dirname(src) CopyFile(PREFIX+"/tmp/"+dst, dir+"/"+dst+".prebuilt") EnqueueCxx(ipath=IPATH, opts=OPTS, obj=obj, src=dst) return() ipath = [PREFIX+"/tmp"] + ipath + [PREFIX+"/include"] fullsrc = CxxFindSource(src, ipath) if (fullsrc == 0): exit("Cannot find source file "+src) if (COMPILER=="MSVC7"): wobj=PREFIX+"/tmp/"+dst[:-4]+".obj" dst=PREFIX+"/tmp/"+dst DependencyBuild(CompileFlexMSVC7, [pre,dst,fullsrc,wobj,ipath,opts,dashi], [wobj], [fullsrc]) if (COMPILER=="LINUX"): wobj=PREFIX+"/tmp/"+dst[:-4]+".o" dst=PREFIX+"/tmp/"+dst DependencyBuild(CompileFlexLINUXA, [pre,dst,fullsrc,wobj,ipath,opts,dashi], [wobj], [fullsrc]) def CompileIgateMSVC7(ipath,opts,outd,outc,wobj,src,module,library,files): if (OMIT.count("PYTHON")): WriteFile(outc,"") else: dotdots = "" for i in range(0,src.count("/")+1): dotdots = dotdots + "../" cmd = "cd "+src+" & "+dotdots + PREFIX + "/bin/interrogate.exe" cmd = cmd.replace("/","\\") cmd = cmd + ' -DCPPPARSER -D__STDC__=1 -D__cplusplus -longlong __int64 -D_X86_ -DWIN32_VC -D_WIN32' cmd = cmd + ' -D"_declspec(param)=" -D_near -D_far -D__near -D__far -D__stdcall' optlevel=getoptlevel(opts,OPTIMIZE) if (optlevel==1): cmd = cmd + ' ' if (optlevel==2): cmd = cmd + ' ' if (optlevel==3): cmd = cmd + ' -DFORCE_INLINING' if (optlevel==4): cmd = cmd + ' -DFORCE_INLINING' cmd = cmd + ' -S' + dotdots + PREFIX + '/include/parser-inc' cmd = cmd + ' -I' + dotdots + PREFIX + '/python/include' for pkg in PACKAGES: if (PkgSelected(opts,pkg)): cmd = cmd + ' -I' + dotdots + THIRDPARTY + "/win-libs-vc7/" + pkg.lower() + "/include" cmd = cmd + ' -oc ' + dotdots + outc + ' -od ' + dotdots + outd cmd = cmd + ' -fnames -string -refcount -assert -python-native' for x in ipath: cmd = cmd + ' -I' + dotdots + x building = getbuilding(opts) if (building): cmd = cmd + " -DBUILDING_"+building if (opts.count("WITHINPANDA")): cmd = cmd + " -DWITHIN_PANDA" cmd = cmd + ' -module ' + module + ' -library ' + library if ((COMPILER=="MSVC7") and opts.count("DXSDK")): cmd = cmd + ' -I"' + DIRECTXSDK + '/include"' for ver in ["MAYA5","MAYA6","MAYA65"]: if ((COMPILER=="MSVC7") and opts.count(ver)): cmd = cmd + ' -I"' + MAYASDK[ver] + '/include"' for x in files: cmd = cmd + ' ' + x oscmd(cmd) CompileCxxMSVC7(wobj,outc,ipath,opts) def CompileIgateLINUXA(ipath,opts,outd,outc,wobj,src,module,library,files): if (OMIT.count("PYTHON")): WriteFile(outc,"") else: dotdots = "" for i in range(0,src.count("/")+1): dotdots = dotdots + "../" cmd = "cd "+src+" ; "+dotdots + PREFIX + '/bin/interrogate' cmd = cmd + ' -DCPPPARSER -D__STDC__=1 -D__cplusplus -D__i386__ -D__const=const' optlevel = getoptlevel(opts,OPTIMIZE) if (optlevel==1): cmd = cmd + ' ' if (optlevel==2): cmd = cmd + ' ' if (optlevel==3): cmd = cmd + ' ' if (optlevel==4): cmd = cmd + ' ' cmd = cmd + ' -S' + dotdots + PREFIX + '/include/parser-inc -S/usr/include' cmd = cmd + ' -I' + dotdots + PREFIX + '/python/include' for pkg in PACKAGES: if (PkgSelected(opts,pkg)): cmd = cmd + ' -I' + dotdots + THIRDPARTY + "/linux-libs-a/" + pkg.lower() + "/include" cmd = cmd + ' -oc ' + dotdots + outc + ' -od ' + dotdots + outd cmd = cmd + ' -fnames -string -refcount -assert -python-native' for x in ipath: cmd = cmd + ' -I' + dotdots + x building = getbuilding(opts) if (building): cmd = cmd + " -DBUILDING_"+building if (opts.count("WITHINPANDA")): cmd = cmd + " -DWITHIN_PANDA" cmd = cmd + ' -module ' + module + ' -library ' + library if (opts.count("DXSDK")): cmd = cmd + ' -I"' + DIRECTXSDK + '/include"' for ver in ["MAYA5","MAYA6","MAYA65"]: if (opts.count(ver)): cmd = cmd + ' -I"' + MAYASDK[ver] + '/include"' for x in files: cmd = cmd + ' ' + x oscmd(cmd) CompileCxxLINUXA(wobj,outc,ipath,opts) def EnqueueIgate(ipath=0, opts=0, outd=0, obj=0, src=0, module=0, library=0, also=0, skip=0): if ((ipath==0)|(opts==0)|(outd==0)|(obj==0)|(src==0)|(module==0)|(library==0)|(also==0)|(skip==0)): exit("syntax error in EnqueueIgate directive") ALLIN.append(outd) outd = PREFIX+"/pandac/input/"+outd dirlisting = os.listdir(src) files = fnmatch.filter(dirlisting,"*.h") if (skip=='ALL'): files=[] else: files.sort() for x in skip: if (files.count(x)!=0): files.remove(x) for x in also: files.append(x) ipath = [PREFIX+"/tmp"] + ipath + [PREFIX+"/include"] dep = CxxCalcDependenciesAll(xpaths(src+"/",files,""), ipath) dep.append(PREFIX+"/tmp/dtool_have_python.dat") if (COMPILER=="MSVC7"): dep.append(PREFIX+"/bin/interrogate.exe") wobj = PREFIX+"/tmp/"+obj outc = obj[:-4]+".cxx" DependencyBuild(CompileIgateMSVC7, [ipath,opts,outd,outc,wobj,src,module,library,files], [outd, wobj], dep) if (COMPILER=="LINUXA"): dep.append(PREFIX+"/bin/interrogate") wobj = PREFIX+"/tmp/"+outc[:-4]+".o" outc = obj[:-2]+".cxx" DependencyBuild(CompileIgateLINUXA, [ipath,opts,outd,outc,wobj,src,module,library,files], [outd, wobj], dep) def CompileImodMSVC7(outc, wobj, module, library, ipath, opts, files): if (OMIT.count("PYTHON")): WriteFile(outc,"") else: cmd = PREFIX + '/bin/interrogate_module.exe ' cmd = cmd.replace("/","\\") cmd = cmd + ' -oc ' + outc + ' -module ' + module + ' -library ' + library + ' -python-native ' for x in files: cmd = cmd + ' ' + x oscmd(cmd) CompileCxxMSVC7(wobj,outc,ipath,opts) def CompileImodLINUXA(outc, wobj, module, library, ipath, opts, files): if (OMIT.count("PYTHON")): WriteFile(outc,"") else: cmd = PREFIX + '/bin/interrogate_module ' cmd = cmd + ' -oc ' + outc + ' -module ' + module + ' -library ' + library + ' -python-native ' for x in files: cmd = cmd + ' ' + x oscmd(cmd) CompileCxxLINUXA(wobj,outc,ipath,opts) def EnqueueImod(ipath=0, opts=0, obj=0, module=0, library=0, files=0): if ((ipath==0)|(opts==0)|(obj==0)|(module==0)|(library==0)|(files==0)): exit("syntax error in EnqueueImod directive") ipath = [PREFIX+"/tmp"] + ipath + [PREFIX+"/include"] outc = PREFIX+"/tmp/"+obj[:-4]+".cxx" files = xpaths(PREFIX+"/pandac/input/",files,"") dep = files + [PREFIX+"/tmp/dtool_have_python.dat"] if (COMPILER=="MSVC7"): wobj = PREFIX+"/tmp/"+obj[:-4]+".obj" DependencyBuild(CompileImodMSVC7, [outc, wobj, module, library, ipath, opts, files], [wobj], dep) if (COMPILER=="LINUXA"): wobj = PREFIX+"/tmp/"+obj[:-4]+".o" DependencyBuild(CompileImodLINUXA, [outc, wobj, module, library, ipath, opts, files], [wobj], dep) def CompileLibMSVC7(wlib, wobj, opts): cmd = 'link.exe /lib /nologo /OUT:' + wlib optlevel = getoptlevel(opts,OPTIMIZE) if (optlevel==4): cmd = cmd + " /LTCG " for x in wobj: cmd = cmd + ' ' + x oscmd(cmd) def CompileLibLINUXA(wlib, wobj, opts): cmd = 'ar cru ' + wlib for x in wobj: cmd=cmd + ' ' + x oscmd(cmd) def EnqueueLib(lib=0, obj=[], opts=[]): if (lib==0): exit("syntax error in EnqueueLib directive") if (COMPILER=="MSVC7"): if (lib[-4:]==".ilb"): wlib = PREFIX+"/tmp/" + lib[:-4] + ".lib" else: wlib = PREFIX+"/lib/" + lib[:-4] + ".lib" wobj = xpaths(PREFIX+"/tmp/",obj,"") DependencyBuild(CompileLibMSVC7, [wlib, wobj, opts], [wlib], wobj) if (COMPILER=="LINUXA"): if (lib[-4:]==".ilb"): wlib = PREFIX+"/tmp/" + lib[:-4] + ".a" else: wlib = PREFIX+"/lib/" + lib[:-4] + ".a" wobj = [] for x in obj: wobj.append(PREFIX + "/tmp/" + x[:-4] + ".o") DependencyBuild(CompileLibLINUXA, [wlib, wobj, opts], [wlib], wobj) def CompileLinkMSVC7(wdll, wlib, wobj, opts, ldef): cmd = 'link.exe /nologo /NODEFAULTLIB:LIBCI.LIB /NODEFAULTLIB:MSVCRTD.LIB /DEBUG ' if (wdll[-4:]!=".exe"): cmd = cmd + " /DLL" optlevel = getoptlevel(opts,OPTIMIZE) if (optlevel==1): cmd = cmd + " /MAP /MAPINFO:LINES /MAPINFO:EXPORTS" if (optlevel==2): cmd = cmd + " /MAP:NUL " if (optlevel==3): cmd = cmd + " /MAP:NUL " if (optlevel==4): cmd = cmd + " /MAP:NUL /LTCG " cmd = cmd + " /FIXED:NO /OPT:REF /STACK:4194304 /INCREMENTAL:NO " if (ldef!=0): cmd = cmd + ' /DEF:"' + ldef + '"' cmd = cmd + ' /OUT:' + wdll if (wlib != 0): cmd = cmd + ' /IMPLIB:' + wlib if (OMIT.count("PYTHON")==0): cmd = cmd + ' /LIBPATH:' + PREFIX + '/python/libs ' for x in wobj: cmd = cmd + ' ' + x if (wdll[-4:]==".exe"): cmd = cmd + ' panda/src/configfiles/pandaIcon.obj' if (opts.count("D3D8") or opts.count("D3D9") or opts.count("DXDRAW") or opts.count("DXSOUND") or opts.count("DXGUID")): cmd = cmd + ' /LIBPATH:"' + DIRECTXSDK + '/lib/x86"' cmd = cmd + ' /LIBPATH:"' + DIRECTXSDK + '/lib"' if (opts.count("D3D8")): cmd = cmd + ' d3d8.lib d3dx8.lib dxerr8.lib' if (opts.count("D3D9")): cmd = cmd + ' d3d9.lib d3dx9.lib dxerr9.lib' if (opts.count("DXDRAW")): cmd = cmd + ' ddraw.lib' if (opts.count("DXSOUND")): cmd = cmd + ' dsound.lib' if (opts.count("DXGUID")): cmd = cmd + ' dxguid.lib' if (opts.count("WINSOCK")): cmd = cmd + " wsock32.lib" if (opts.count("WINSOCK2")): cmd = cmd + " wsock32.lib ws2_32.lib" if (opts.count("WINCOMCTL")): cmd = cmd + ' comctl32.lib' if (opts.count("WINCOMDLG")): cmd = cmd + ' comdlg32.lib' if (opts.count("WINUSER")): cmd = cmd + " user32.lib" if (opts.count("WINMM")): cmd = cmd + " winmm.lib" if (opts.count("WINIMM")): cmd = cmd + " imm32.lib" if (opts.count("WINKERNEL")): cmd = cmd + " kernel32.lib" if (opts.count("WINOLDNAMES")): cmd = cmd + " oldnames.lib" if (opts.count("WINGDI")): cmd = cmd + " gdi32.lib" if (opts.count("ADVAPI")): cmd = cmd + " advapi32.lib" if (opts.count("GLUT")): cmd = cmd + " opengl32.lib glu32.lib" if (PkgSelected(opts,"ZLIB")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/zlib/lib/libpandazlib1.lib' if (PkgSelected(opts,"PNG")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/png/lib/libpandapng13.lib' if (PkgSelected(opts,"JPEG")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/jpeg/lib/libpandajpeg.lib' if (PkgSelected(opts,"TIFF")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/tiff/lib/libpandatiff.lib' if (PkgSelected(opts,"VRPN")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/vrpn/lib/vrpn.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/vrpn/lib/quat.lib' if (PkgSelected(opts,"FMOD")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/fmod/lib/fmod.lib' if (PkgSelected(opts,"MILES")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/miles/lib/mss32.lib' if (PkgSelected(opts,"NVIDIACG")): if (opts.count("CGGL")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/nvidiacg/lib/cgGL.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/nvidiacg/lib/cg.lib' if (PkgSelected(opts,"HELIX")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/helix/lib/runtlib.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/helix/lib/syslib.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/helix/lib/contlib.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/helix/lib/debuglib.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/helix/lib/utillib.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/helix/lib/stlport_vc7.lib' if (PkgSelected(opts,"NSPR")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/nspr/lib/nspr4.lib' if (PkgSelected(opts,"OPENSSL")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/openssl/lib/libpandassl.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/openssl/lib/libpandaeay.lib' if (PkgSelected(opts,"FREETYPE")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/freetype/lib/freetype.lib' if (PkgSelected(opts,"FFTW")): cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/fftw/lib/rfftw.lib' cmd = cmd + ' ' + THIRDPARTY + '/win-libs-vc7/fftw/lib/fftw.lib' for maya in ["MAYA5","MAYA6","MAYA65"]: if (PkgSelected(opts,maya)): cmd = cmd + ' "' + MAYASDK[maya] + '/lib/Foundation.lib"' cmd = cmd + ' "' + MAYASDK[maya] + '/lib/OpenMaya.lib"' cmd = cmd + ' "' + MAYASDK[maya] + '/lib/OpenMayaAnim.lib"' for max in ["MAX5","MAX6","MAX7"]: if PkgSelected(opts,max): cmd = cmd + ' "' + MAXSDK[max] + '/lib/core.lib"' cmd = cmd + ' "' + MAXSDK[max] + '/lib/edmodel.lib"' cmd = cmd + ' "' + MAXSDK[max] + '/lib/gfx.lib"' cmd = cmd + ' "' + MAXSDK[max] + '/lib/geom.lib"' cmd = cmd + ' "' + MAXSDK[max] + '/lib/mesh.lib"' cmd = cmd + ' "' + MAXSDK[max] + '/lib/maxutil.lib"' cmd = cmd + ' "' + MAXSDK[max] + '/lib/paramblk2.lib"' oscmd(cmd) def CompileLinkLINUXA(wdll, wobj, opts, ldef): if (dll[-4:]==".exe"): cmd = 'g++ -o ' + wdll + ' -L' + PREFIX + '/lib -L/usr/X11R6/lib' else: cmd = 'g++ -shared -o ' + wdll + ' -L' + PREFIX + '/lib -L/usr/X11R6/lib' for x in obj: suffix = x[-4:] if (suffix==".obj"): cmd = cmd + ' ' + PREFIX + '/tmp/' + x[:-4] + '.o' elif (suffix==".dll"): cmd = cmd + ' -l' + x[3:-4] elif (suffix==".lib"): cmd = cmd + ' ' + PREFIX + '/lib/' + x[:-4] + '.a' elif (suffix==".ilb"): cmd = cmd + ' ' + PREFIX + '/tmp/' + x[:-4] + '.a' if (PkgSelected(opts,"FMOD")): cmd = cmd + ' -L' + THIRDPARTY + '/linux-libs-a/fmod/lib -lfmod-3.74' if (PkgSelected(opts,"NVIDIACG")): cmd = cmd + ' -L' + THIRDPARTY + 'nvidiacg/lib ' if (opts.count("CGGL")): cmd = cmd + " -lCgGL" cmd = cmd + " -lCg" if (PkgSelected(opts,"NSPR")): cmd = cmd + ' -L' + THIRDPARTY + '/linux-libs-a/nspr/lib -lpandanspr4' if (PkgSelected(opts,"ZLIB")): cmd = cmd + " -lz" if (PkgSelected(opts,"PNG")): cmd = cmd + " -lpng" if (PkgSelected(opts,"JPEG")): cmd = cmd + " -ljpeg" if (PkgSelected(opts,"TIFF")): cmd = cmd + " -ltiff" if (PkgSelected(opts,"OPENSSL")): cmd = cmd + " -lssl" if (PkgSelected(opts,"FREETYPE")): cmd = cmd + " -lfreetype" if (PkgSelected(opts,"VRPN")): cmd = cmd + ' -L' + THIRDPARTY + '/linux-libs-a/vrpn/lib -lvrpn -lquat' if (PkgSelected(opts,"FFTW")): cmd = cmd + ' -L' + THIRDPARTY + '/linux-libs-a/fftw/lib -lrfftw -lfftw' if (opts.count("GLUT")): cmd = cmd + " -lGL -lGLU" oscmd(cmd) def EnqueueLink(dll=0, obj=[], opts=[], xdep=[], ldef=0): if (dll==0): exit("syntax error in EnqueueLink directive") if (COMPILER=="MSVC7"): wobj = [] for x in obj: suffix = x[-4:] if (suffix==".obj"): wobj.append(PREFIX+"/tmp/"+x) elif (suffix==".dll"): wobj.append(PREFIX+"/lib/"+x[:-4]+".lib") elif (suffix==".lib"): wobj.append(PREFIX+"/lib/"+x) elif (suffix==".ilb"): wobj.append(PREFIX+"/tmp/"+x[:-4]+".lib") else: exit("unknown suffix in object list.") if (dll[-4:]==".exe"): wdll = PREFIX+"/bin/"+dll DependencyBuild(CompileLinkMSVC7, [wdll, 0, wobj, opts, ldef], [wdll], wobj) elif (dll[-4:]==".dll"): wdll = PREFIX+"/bin/"+dll wlib = PREFIX+"/lib/"+dll[:-4]+".lib" DependencyBuild(CompileLinkMSVC7, [wdll, wlib, wobj, opts, ldef], [wdll, wlib], wobj) else: wdll = PREFIX+"/plugins/"+dll DependencyBuild(CompileLinkMSVC7, [wdll, 0, wobj, opts, ldef], [wdll], wobj) if (COMPILER=="LINUXA"): if (dll[-4:]==".exe"): wdll = PREFIX+"/bin/"+dll[:-4] else: wdll = PREFIX+"/lib/"+dll[:-4]+".so" wobj = [] for x in obj: suffix = x[-4:] if (suffix==".obj"): wobj.append(PREFIX+"/tmp/"+x[:-4]+".o") elif (suffix==".dll"): wobj.append(PREFIX+"/lib/"+x[:-4]+".so") elif (suffix==".lib"): wobj.append(PREFIX+"/lib/"+x[:-4]+".a") elif (suffix==".ilb"): wobj.append(PREFIX+"/tmp/"+x[:-4]+".a") else: exit("unknown suffix in object list.") DependencyBuild(CompileLinkLINUXA, [wdll, wobj, opts, ldef], [wdll], wobj) CxxIgnoreHeader["Python.h"] = 1 CxxIgnoreHeader["Python/Python.h"] = 1 CxxIgnoreHeader["Cg/cg.h"] = 1 CxxIgnoreHeader["Cg/cgGL.h"] = 1 CxxIgnoreHeader["alloc.h"] = 1 CxxIgnoreHeader["ctype.h"] = 1 CxxIgnoreHeader["stdlib.h"] = 1 CxxIgnoreHeader["ipc_thread.h"] = 1 CxxIgnoreHeader["platform/symbian/symbian_print.h"] = 1 CxxIgnoreHeader["hxtypes.h"] = 1 CxxIgnoreHeader["hxcom.h"] = 1 CxxIgnoreHeader["hxiids.h"] = 1 CxxIgnoreHeader["hxpiids.h"] = 1 CxxIgnoreHeader["dsound.h"] = 1 CxxIgnoreHeader["hlxosstr.h"] = 1 CxxIgnoreHeader["ddraw.h"] = 1 CxxIgnoreHeader["mss.h"] = 1 CxxIgnoreHeader["MacSocket.h"] = 1 CxxIgnoreHeader["textureTransition.h"] = 1 CxxIgnoreHeader["transformTransition.h"] = 1 CxxIgnoreHeader["billboardTransition.h"] = 1 CxxIgnoreHeader["transformTransition.h"] = 1 CxxIgnoreHeader["transparencyTransition.h"] = 1 CxxIgnoreHeader["allTransitionsWrapper.h"] = 1 CxxIgnoreHeader["allTransitionsWrapper.h"] = 1 CxxIgnoreHeader["namedNode.h"] = 1 CxxIgnoreHeader["renderRelation.h"] = 1 CxxIgnoreHeader["renderTraverser.h"] = 1 CxxIgnoreHeader["get_rel_pos.h"] = 1 CxxIgnoreHeader["windows.h"] = 1 CxxIgnoreHeader["windef.h"] = 1 CxxIgnoreHeader["afxres.h"] = 1 CxxIgnoreHeader["Max.h"] = 1 CxxIgnoreHeader["iparamb2.h"] = 1 CxxIgnoreHeader["iparamm2.h"] = 1 CxxIgnoreHeader["istdplug.h"] = 1 CxxIgnoreHeader["iskin.h"] = 1 CxxIgnoreHeader["stdmat.h"] = 1 CxxIgnoreHeader["phyexp.h"] = 1 CxxIgnoreHeader["bipexp.h"] = 1 CxxIgnoreHeader["modstack.h"] = 1 CxxIgnoreHeader["decomp.h"] = 1 CxxIgnoreHeader["shape.h"] = 1 CxxIgnoreHeader["simpobj.h"] = 1 CxxIgnoreHeader["surf_api.h"] = 1 CxxIgnoreHeader["openssl/evp.h"] = 1 CxxIgnoreHeader["openssl/rand.h"] = 1 CxxIgnoreHeader["openssl/md5.h"] = 1 CxxIgnoreHeader["openssl/err.h"] = 1 CxxIgnoreHeader["openssl/ssl.h"] = 1 CxxIgnoreHeader["openssl/pem.h"] = 1 CxxIgnoreHeader["openssl/rsa.h"] = 1 CxxIgnoreHeader["openssl/bio.h"] = 1 CxxIgnoreHeader["openssl/x509.h"] = 1 CxxIgnoreHeader["map"] = 1 CxxIgnoreHeader["vector"] = 1 CxxIgnoreHeader["set"] = 1 CxxIgnoreHeader["algorithm"] = 1 VERSION1=int(VERSION.split(".")[0]) VERSION2=int(VERSION.split(".")[1]) VERSION3=int(VERSION.split(".")[2]) NVERSION=VERSION1*1000000+VERSION2*1000+VERSION3 conf="""
PANDAVERSION_H="""
def LocateVisualStudio(): # Try to use the Visual Toolkit 2003 if (os.environ.has_key("VCTOOLKITINSTALLDIR")): vcdir = os.environ["VCTOOLKITINSTALLDIR"] platsdk=GetRegistryKey("SOFTWARE\\Microsoft\\MicrosoftSDK\\InstalledSDKs\\8F9E5EF3-A9A5-491B-A889-C58EFFECE8B3", "Install Dir") if (platsdk == 0): exit("Found VC Toolkit, but cannot locate MS Platform SDK") WARNINGS.append("Using visual toolkit: "+vcdir) WARNINGS.append("Using MS Platform SDK: "+platsdk) AddToVisualStudioPath("PATH", vcdir + "\\bin") AddToVisualStudioPath("INCLUDE", platsdk + "\\include") AddToVisualStudioPath("INCLUDE", vcdir + "\\include") AddToVisualStudioPath("INCLUDE", DIRECTXSDK + "\\include") AddToVisualStudioPath("LIB", platsdk + "\\lib") AddToVisualStudioPath("LIB", vcdir + "\\lib") AddToVisualStudioPath("LIB", THIRDPARTY + "\\win-libs-vc7\\extras\\lib") AddToVisualStudioPath("INCLUDE", DIRECTXSDK + "\\lib") return # Try to use Visual Studio vcdir = GetRegistryKey("SOFTWARE\\Microsoft\\VisualStudio\\7.1", "InstallDir") if (vcdir == 0): vcdir = GetRegistryKey("SOFTWARE\\Microsoft\\VisualStudio\\7.0", "InstallDir") if (vcdir != 0) and (vcdir[-13:] == "\\Common7\\IDE\\"): vcdir = vcdir[:-12] WARNINGS.append("Using visual studio: "+vcdir) AddToVisualStudioPath("PATH", vcdir + "vc7\\bin") AddToVisualStudioPath("PATH", vcdir + "Common7\\IDE") AddToVisualStudioPath("PATH", vcdir + "Common7\\Tools") AddToVisualStudioPath("PATH", vcdir + "Common7\\Tools\\bin\\prerelease") AddToVisualStudioPath("PATH", vcdir + "Common7\\Tools\\bin") AddToVisualStudioPath("INCLUDE", vcdir + "vc7\\ATLMFC\\INCLUDE") AddToVisualStudioPath("INCLUDE", vcdir + "vc7\\include") AddToVisualStudioPath("INCLUDE", vcdir + "vc7\\PlatformSDK\\include\\prerelease") AddToVisualStudioPath("INCLUDE", vcdir + "vc7\\PlatformSDK\\include") AddToVisualStudioPath("LIB", vcdir + "vc7\\ATLMFC\\LIB") AddToVisualStudioPath("LIB", vcdir + "vc7\\LIB") AddToVisualStudioPath("LIB", vcdir + "vc7\\PlatformSDK\\lib\\prerelease") AddToVisualStudioPath("LIB", vcdir + "vc7\\PlatformSDK\\lib") return # Give up exit("Cannot locate Microsoft Visual Studio 7.0, 7.1, or the Visual Toolkit 2003")
8f86705296f973090fc3f2bedce1bf0e6e54e9f6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/7242/8f86705296f973090fc3f2bedce1bf0e6e54e9f6/makepanda.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2851, 340, 25780, 510, 4484, 13332, 225, 468, 6161, 358, 999, 326, 26832, 13288, 8691, 4044, 23, 309, 261, 538, 18, 28684, 18, 5332, 67, 856, 2932, 58, 1268, 51, 1741, 47, 1285, 28865, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2851, 340, 25780, 510, 4484, 13332, 225, 468, 6161, 358, 999, 326, 26832, 13288, 8691, 4044, 23, 309, 261, 538, 18, 28684, 18, 5332, 67, 856, 2932, 58, 1268, 51, 1741, 47, 1285, 28865, ...
args_gv = [v3, v4, rgenop.genconst(1)]
args_gv = [v3, v4, rgenop.genconst(True)]
#def dummyfn(counter, a, b):
b92bda44ed691d512251458db498d89907ee3cbc /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/6934/b92bda44ed691d512251458db498d89907ee3cbc/rgenop_tests.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 468, 536, 9609, 4293, 12, 7476, 16, 279, 16, 324, 4672, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 468, 536, 9609, 4293, 12, 7476, 16, 279, 16, 324, 4672, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -...
@identity.require(identity.in_group('cvsextras', 'cvsadmin'))
@identity.require(identity.in_any_group('cvsextras', 'cvsadmin'))
def toggle_owner(self, containerId): # Check that the pkgid is orphaned pkg = model.PackageListing.get_by(id=containerId) if not pkg: return dict(status=False, message='No such package %s' % containerId) ### FIXME: We want to allow "admin" users to set orphan status as well. if pkg.owner == identity.current.user.user_id: # Release ownership pkg.owner = ORPHAN_ID ownerName = 'Orphaned Package (orphan)' logMessage = 'Package %s in %s %s was orphaned by %s (%s)' % ( pkg.package.name, pkg.collection.name, pkg.collection.version, identity.current.user.display_name, identity.current.user_name) status = model.StatusTranslation.get_by(statusname='Orphaned') elif pkg.owner == ORPHAN_ID: # Take ownership pkg.owner = identity.current.user.user_id ownerName = '%s (%s)' % (identity.current.user.display_name, identity.current.user_name) logMessage = 'Package %s in %s %s is now owned by %s' % ( pkg.package.name, pkg.collection.name, pkg.collection.version, ownerName) status = model.StatusTranslation.get_by(statusname='Owned') else: return dict(status=False, message= 'Package %s not available for taking' % containerId)
5c83052d47da4dbdd7d70545008dd7d6ee9475e9 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9953/5c83052d47da4dbdd7d70545008dd7d6ee9475e9/controllers.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 10486, 67, 8443, 12, 2890, 16, 27142, 4672, 225, 468, 2073, 716, 326, 3475, 350, 353, 31124, 3475, 273, 938, 18, 2261, 19081, 18, 588, 67, 1637, 12, 350, 33, 3782, 548, 13, 309, 486, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 10486, 67, 8443, 12, 2890, 16, 27142, 4672, 225, 468, 2073, 716, 326, 3475, 350, 353, 31124, 3475, 273, 938, 18, 2261, 19081, 18, 588, 67, 1637, 12, 350, 33, 3782, 548, 13, 309, 486, ...
sender, text = Utils.SnarfMessage(msg)
sender = msg.GetSender() filename = 'heldmsg-%s-%d.txt' % (self.internal_name(), id) omask = os.umask(002) try: fp = open(os.path.join(mm_cfg.DATA_DIR, filename), 'w') fp.write(str(msg)) fp.close() finally: os.umask(omask)
def HoldMessage(self, msg, reason): # assure that the database is open for writing self.__opendb() # get the next unique id id = self.__request_id() assert not self.__db.has_key(id) # flatten the message and suck out the sender address sender, text = Utils.SnarfMessage(msg) # save the information to the request database. for held message # entries, each record in the database will be of the following # format: # # the time the message was received # the sender of the message # the message's subject # a string description of the problem # the full text of the message # msgsubject = msg.get('subject', '(no subject)') data = time.time(), sender, msgsubject, reason, text self.__db[id] = (mm_cfg.HELDMSG, data)
af1fd3ec170fe88a3989e311b6a1288555faaeb3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2120/af1fd3ec170fe88a3989e311b6a1288555faaeb3/ListAdmin.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 670, 1673, 1079, 12, 2890, 16, 1234, 16, 3971, 4672, 468, 1551, 594, 716, 326, 2063, 353, 1696, 364, 7410, 365, 16186, 556, 409, 70, 1435, 468, 336, 326, 1024, 3089, 612, 612, 273, 365...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 670, 1673, 1079, 12, 2890, 16, 1234, 16, 3971, 4672, 468, 1551, 594, 716, 326, 2063, 353, 1696, 364, 7410, 365, 16186, 556, 409, 70, 1435, 468, 336, 326, 1024, 3089, 612, 612, 273, 365...