rem
stringlengths
2
226k
add
stringlengths
0
227k
context
stringlengths
8
228k
meta
stringlengths
156
215
input_ids
list
attention_mask
list
labels
list
def dummy_write(self, *args, **kwargs): print "PageEditor can't save a page if Abort is returned from PreSave event handlers" assert False
pagename = u'AutoCreatedMoinMoinTemporaryTestPageFortestSave' testtext = u'ThisIsSomeStupidTestPageText!'
def dummy_write(self, *args, **kwargs): print "PageEditor can't save a page if Abort is returned from PreSave event handlers" assert False
05913ee2f82a4d9e79d238f6103bc7bf3cbd120a /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/888/05913ee2f82a4d9e79d238f6103bc7bf3cbd120a/test_PageEditor.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 9609, 67, 2626, 12, 2890, 16, 380, 1968, 16, 2826, 4333, 4672, 1172, 315, 1964, 6946, 848, 1404, 1923, 279, 1363, 309, 14263, 353, 2106, 628, 2962, 4755, 871, 4919, 6, 1815, 1083, 2, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 9609, 67, 2626, 12, 2890, 16, 380, 1968, 16, 2826, 4333, 4672, 1172, 315, 1964, 6946, 848, 1404, 1923, 279, 1363, 309, 14263, 353, 2106, 628, 2962, 4755, 871, 4919, 6, 1815, 1083, 2, -...
return srs.reverse(val[pos+1:-1]) except: continue
end = val.find('>',pos+4) return srs.reverse(val[pos+1:end]) except: pass
def findsrs(fp): lastln = None for ln in fp: if lastln: if ln[0].isspace() and ln[0] != '\n': lastln += ln continue try: name,val = lastln.rstrip().split(None,1) pos = val.find('<SRS') if pos >= 0: return srs.reverse(val[pos+1:-1]) except: continue lnl = ln.lower() if lnl.startswith('action:'): if lnl.split()[-1] != 'failed': break for k in ('message-id:','x-mailer:','sender:','references:'): if lnl.startswith(k): lastln = ln break
b2d8e838a2b42e59269f346107235e0ad465cf17 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/139/b2d8e838a2b42e59269f346107235e0ad465cf17/bms.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 13094, 5453, 12, 7944, 4672, 1142, 2370, 273, 599, 364, 7211, 316, 4253, 30, 309, 1142, 2370, 30, 309, 7211, 63, 20, 8009, 1054, 909, 1435, 471, 7211, 63, 20, 65, 480, 2337, 82, 4278, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 13094, 5453, 12, 7944, 4672, 1142, 2370, 273, 599, 364, 7211, 316, 4253, 30, 309, 1142, 2370, 30, 309, 7211, 63, 20, 8009, 1054, 909, 1435, 471, 7211, 63, 20, 65, 480, 2337, 82, 4278, ...
text, self.signature(msgid),
self.toencoded(text), self.toencoded(self.signature(msgid)),
def sendMailTo(self, recipients, text, REQUEST, subjectSuffix='', subject='', message_id=None, in_reply_to=None, to=None, exclude_address=None, ): """ Send a mail-out containing text to a list of email addresses.
bc610418ea59041ca844bb810fddfd1c7ad8fcb0 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/5225/bc610418ea59041ca844bb810fddfd1c7ad8fcb0/Mail.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1366, 6759, 774, 12, 2890, 16, 12045, 16, 977, 16, 12492, 16, 3221, 5791, 2218, 2187, 3221, 2218, 2187, 883, 67, 350, 33, 7036, 16, 316, 67, 10629, 67, 869, 33, 7036, 16, 358, 33, 70...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1366, 6759, 774, 12, 2890, 16, 12045, 16, 977, 16, 12492, 16, 3221, 5791, 2218, 2187, 3221, 2218, 2187, 883, 67, 350, 33, 7036, 16, 316, 67, 10629, 67, 869, 33, 7036, 16, 358, 33, 70...
if(macro_cycle == 1 or macro_cycle == 3 and bss is not None):
if((macro_cycle == 1 or macro_cycle == 3) and bss is not None):
def __init__(self, fmodel, selections = None, refine_r = True, refine_t = True, r_initial = None, t_initial = None, nref_min = 1000, max_iterations = 50, convergence_test = True, convergence_delta = 0.00001, use_only_low_resolution = False, bss = None, log = None): if(log is None): log = sys.stdout if(selections is None): selections = [] selections.append(flex.bool(fmodel.xray_structure.scatterers().size(), True)) else: assert len(selections) > 0 self.total_rotation = [] self.total_translation = [] for item in selections: self.total_rotation.append(flex.double(3,0)) self.total_translation.append(flex.double(3,0)) if(r_initial is None): r_initial = [] for item in selections: r_initial.append(flex.double(3,0)) if(t_initial is None): t_initial = [] for item in selections: t_initial.append(flex.double(3,0)) fmodel_copy = fmodel.deep_copy() if(fmodel_copy.mask_params is not None): fmodel_copy.mask_params.verbose = -1 d_mins = setup_search_range(f = fmodel_copy.f_obs_w(), nref_min = nref_min) if(use_only_low_resolution): if(len(d_mins) > 1): d_mins = d_mins[:-1] print >> log, "High resolution cutoffs for mz-protocol: ", \ [str("%.3f"%i) for i in d_mins] for res in d_mins: print >> log xrs = fmodel_copy.xray_structure.deep_copy_scatterers() fmodel_copy = fmodel.resolution_filter(d_min = res) fmodel_copy.update_xray_structure(xray_structure = xrs, update_f_calc = True) rworks = flex.double() for macro_cycle in range(1, min(int(res),4)+1): if(macro_cycle == 1 or macro_cycle == 3 and bss is not None): print_statistics.make_sub_header( "Bulk solvent correction (and anisotropic scaling)",out=log) if(fmodel_copy.f_obs.d_min() > 3.0): save_bss_anisotropic_scaling = bss.anisotropic_scaling bss.anisotropic_scaling=False fmodel_copy.update_solvent_and_scale(params = bss, out = log) if(fmodel_copy.f_obs.d_min() > 3.0): bss.anisotropic_scaling=save_bss_anisotropic_scaling minimized = rigid_body_minimizer(fmodel = fmodel_copy, selections = selections, r_initial = r_initial, t_initial = t_initial, refine_r = refine_r, refine_t = refine_t, max_iterations = max_iterations) rotation_matrices = [] translation_vectors = [] for i in xrange(len(selections)): self.total_rotation[i] += flex.double(minimized.r_min[i]) self.total_translation[i] += flex.double(minimized.t_min[i]) rot_obj = rb_mat(phi = minimized.r_min[i][0], psi = minimized.r_min[i][1], the = minimized.r_min[i][2]) rotation_matrices.append(rot_obj.rot_mat()) translation_vectors.append(minimized.t_min[i]) new_xrs = apply_transformation( xray_structure = minimized.fmodel.xray_structure, rotation_matrices = rotation_matrices, translation_vectors = translation_vectors, selections = selections) fmodel_copy.update_xray_structure(xray_structure = new_xrs, update_f_calc = True, update_f_mask = True, out = log) rwork = minimized.fmodel.r_work() rfree = minimized.fmodel.r_free() assert approx_equal(rwork, fmodel_copy.r_work()) self.show(f = fmodel_copy.f_obs_w(), r_mat = self.total_rotation, t_vec = self.total_translation, rw = rwork, rf = rfree, tw = minimized.fmodel.target_w(), mc = macro_cycle, it = minimized.counter, ct = convergence_test, out = log) if(convergence_test): rworks.append(rwork) if(rworks.size() > 1): size = rworks.size() - 1 if(abs(rworks[size]-rworks[size-1])<convergence_delta): break fmodel.update_xray_structure(xray_structure = fmodel_copy.xray_structure, update_f_calc = True) self.fmodel = fmodel
8ccd7bddeaef857253843a4ad2aa16dc8e994d2a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/696/8ccd7bddeaef857253843a4ad2aa16dc8e994d2a/rigid_body.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 284, 2284, 16, 21738, 2868, 273, 599, 16, 30446, 67, 86, 7734, 273, 1053, 16, 30446, 67, 88, 7734, 273, 1053, 16, 436, 67, 6769, 9079, 273, 599, 16, 26...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 284, 2284, 16, 21738, 2868, 273, 599, 16, 30446, 67, 86, 7734, 273, 1053, 16, 30446, 67, 88, 7734, 273, 1053, 16, 436, 67, 6769, 9079, 273, 599, 16, 26...
def released(keyval):
def released(hardware_keycode):
def released(keyval): #gdk.keyboard_ungrab(event.time) action = self.pressed[keyval] del self.pressed[keyval] #print 'RELEASE', action.get_name() if action.keyup_callback: action.keyup_callback(widget, event) action.keyup_callback = None
1b52c26823b4c08287f165004672bad43ccf1bc0 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7129/1b52c26823b4c08287f165004672bad43ccf1bc0/keyboard.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 225, 1652, 15976, 12, 25118, 67, 856, 710, 4672, 468, 75, 2883, 18, 31486, 67, 318, 2752, 70, 12, 2575, 18, 957, 13, 1301, 273, 365, 18, 10906, 63, 856, 1125, 65, 1464, 365, 18, 10906, 63,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 225, 1652, 15976, 12, 25118, 67, 856, 710, 4672, 468, 75, 2883, 18, 31486, 67, 318, 2752, 70, 12, 2575, 18, 957, 13, 1301, 273, 365, 18, 10906, 63, 856, 1125, 65, 1464, 365, 18, 10906, 63,...
if self['door_single_texture']:
if self['door_texture']:
def writeAttributes(f, type, s = self): color = s[type + '_color'] f.write('%s_texture: %s\n' % (type, s[type + '_texture'])) f.write('%s_color: Vec4(%.2f, %.2f, %.2f, 1.0)\n' % (type, color[0], color[1], color[2]))
083ae49ef00dc006ef5bb4cf2f41c398c70f0c82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8543/083ae49ef00dc006ef5bb4cf2f41c398c70f0c82/LevelEditor.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1045, 2498, 12, 74, 16, 618, 16, 272, 273, 365, 4672, 2036, 273, 272, 63, 723, 397, 2070, 3266, 3546, 284, 18, 2626, 29909, 87, 67, 955, 594, 30, 738, 87, 64, 82, 11, 738, 261, 723...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1045, 2498, 12, 74, 16, 618, 16, 272, 273, 365, 4672, 2036, 273, 272, 63, 723, 397, 2070, 3266, 3546, 284, 18, 2626, 29909, 87, 67, 955, 594, 30, 738, 87, 64, 82, 11, 738, 261, 723...
client._request('REPORT', '%s/%s' % (PRINCIPAL_DAV_PATH, CALENDAR), body=body, headers={ 'Depth': '1' })
client._request('REPORT', '%s/%s' % (PRINCIPAL_DAV_PATH, CALENDAR), body=body, headers={ 'Content-Type':'text/xml', 'Depth': '1' })
def test_timerangequery_invalid_not_utc_1(): body = open(FILES_DIR+'reports/timerangequery/invalid_nonUTC1.xml').read() client._request('REPORT', '%s/%s' % (PRINCIPAL_DAV_PATH, CALENDAR), body=body, headers={ 'Depth': '1' }) assert client.response.status == 400
0eb233b34e19f3cc4572957bdd4dfc9d1e2048e1 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9939/0eb233b34e19f3cc4572957bdd4dfc9d1e2048e1/test_reports_with_float.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 12542, 726, 2271, 67, 5387, 67, 902, 67, 17892, 67, 21, 13332, 1417, 273, 1696, 12, 12669, 67, 4537, 6797, 20195, 19, 12542, 726, 2271, 19, 5387, 67, 5836, 11471, 21, 18, 290...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 12542, 726, 2271, 67, 5387, 67, 902, 67, 17892, 67, 21, 13332, 1417, 273, 1696, 12, 12669, 67, 4537, 6797, 20195, 19, 12542, 726, 2271, 19, 5387, 67, 5836, 11471, 21, 18, 290...
"""Adds a Fragment instance to the Chain. If delay_sort is True,
"""Adds a Fragment instance to the Chain. If delay_sort is True,
def add_fragment(self, fragment, delay_sort=False): """Adds a Fragment instance to the Chain. If delay_sort is True, then the fragment is not inserted in the proper position within the chain. """ Segment.add_fragment(self, fragment, delay_sort) fragment.chain = self
a181eacbeb3c68b8f7caba918a821d3f3e6c50f6 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10674/a181eacbeb3c68b8f7caba918a821d3f3e6c50f6/Structure.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 11956, 12, 2890, 16, 5481, 16, 4624, 67, 3804, 33, 8381, 4672, 3536, 3655, 279, 18009, 791, 358, 326, 7824, 18, 971, 4624, 67, 3804, 353, 1053, 16, 1508, 326, 5481, 353, 486, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 11956, 12, 2890, 16, 5481, 16, 4624, 67, 3804, 33, 8381, 4672, 3536, 3655, 279, 18009, 791, 358, 326, 7824, 18, 971, 4624, 67, 3804, 353, 1053, 16, 1508, 326, 5481, 353, 486, ...
i = 0 while i < len(line) and line[i].isspace(): i = i+1 list.append(' %s\n' % line.strip()) if offset is not None: s = ' ' for c in line[i:offset-1]: if c.isspace(): s = s + c else: s = s + ' ' list.append('%s^\n' % s) value = msg
if line is not None: i = 0 while i < len(line) and line[i].isspace(): i = i+1 list.append(' %s\n' % line.strip()) if offset is not None: s = ' ' for c in line[i:offset-1]: if c.isspace(): s = s + c else: s = s + ' ' list.append('%s^\n' % s) value = msg
def format_exception_only(etype, value): """Format the exception part of a traceback. The arguments are the exception type and value such as given by sys.last_type and sys.last_value. The return value is a list of strings, each ending in a newline. Normally, the list contains a single string; however, for SyntaxError exceptions, it contains several lines that (when printed) display detailed information about where the syntax error occurred. The message indicating which exception occurred is the always last string in the list. """ list = [] if type(etype) == types.ClassType: stype = etype.__name__ else: stype = etype if value is None: list.append(str(stype) + '\n') else: if etype is SyntaxError: try: msg, (filename, lineno, offset, line) = value except: pass else: if not filename: filename = "<string>" list.append(' File "%s", line %d\n' % (filename, lineno)) i = 0 while i < len(line) and line[i].isspace(): i = i+1 list.append(' %s\n' % line.strip()) if offset is not None: s = ' ' for c in line[i:offset-1]: if c.isspace(): s = s + c else: s = s + ' ' list.append('%s^\n' % s) value = msg s = _some_str(value) if s: list.append('%s: %s\n' % (str(stype), s)) else: list.append('%s\n' % str(stype)) return list
89f6e8c77bee38434b8fb8aea90a0e88f44a7203 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/89f6e8c77bee38434b8fb8aea90a0e88f44a7203/traceback.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 740, 67, 4064, 67, 3700, 12, 5872, 16, 460, 4672, 3536, 1630, 326, 1520, 1087, 434, 279, 10820, 18, 225, 1021, 1775, 854, 326, 1520, 618, 471, 460, 4123, 487, 864, 635, 2589, 18, 2722,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 740, 67, 4064, 67, 3700, 12, 5872, 16, 460, 4672, 3536, 1630, 326, 1520, 1087, 434, 279, 10820, 18, 225, 1021, 1775, 854, 326, 1520, 618, 471, 460, 4123, 487, 864, 635, 2589, 18, 2722,...
"http://www.inkscape.org",b
"http://www.inkscape.org",
def __init__(self, name, url, icon, branch, lic): self.name = name self.url = url self.icon = icon self.branch = branch self.lic = lic self.min = -1 self.max = -1 self.langs = 0 self.total = 0 self.eng = 0
4ede5f0f77375c4c904235a48a0f82a020de9635 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/3042/4ede5f0f77375c4c904235a48a0f82a020de9635/audit_step1.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 508, 16, 880, 16, 4126, 16, 3803, 16, 328, 335, 4672, 365, 18, 529, 273, 508, 365, 18, 718, 273, 880, 365, 18, 3950, 273, 4126, 365, 18, 7500, 273, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 508, 16, 880, 16, 4126, 16, 3803, 16, 328, 335, 4672, 365, 18, 529, 273, 508, 365, 18, 718, 273, 880, 365, 18, 3950, 273, 4126, 365, 18, 7500, 273, 3...
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
def getTestCaseNames(testCaseClass, prefix, sortUsing=three_way_cmp):
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp): return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
250fda8c06193340b5f11a71dcac057e35b966d4 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/8125/250fda8c06193340b5f11a71dcac057e35b966d4/unittest.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 29384, 2449, 1557, 12, 3813, 2449, 797, 16, 1633, 16, 1524, 7736, 33, 451, 992, 67, 1888, 67, 9625, 4672, 327, 389, 6540, 2886, 12, 3239, 16, 1524, 7736, 2934, 588, 4709, 2449, 1557, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 29384, 2449, 1557, 12, 3813, 2449, 797, 16, 1633, 16, 1524, 7736, 33, 451, 992, 67, 1888, 67, 9625, 4672, 327, 389, 6540, 2886, 12, 3239, 16, 1524, 7736, 2934, 588, 4709, 2449, 1557, 1...
NetCDF time object.
NetCDF time object.
def _check_index(indices, dates, nctime, calendar): """Return True if the time indices given correspond to the given dates, False otherwise. Parameters ---------- indices : sequence of integers Positive integers indexing the time variable. dates : sequence of datetime objects Reference dates. nctime : netCDF Variable object NetCDF time object. calendar : string Calendar of nctime. """ if (indices <0).any(): return False if (indices >= nctime.shape[0]).any(): return False t = nctime[indices] return numpy.all( num2date(t, nctime.units, calendar) == dates)
bbd2b412f6583e4d366fbba7cbbd6a40ad4b5276 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/8103/bbd2b412f6583e4d366fbba7cbbd6a40ad4b5276/netcdftime.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1893, 67, 1615, 12, 6836, 16, 7811, 16, 290, 21261, 16, 5686, 4672, 3536, 990, 1053, 309, 326, 813, 4295, 864, 4325, 358, 326, 864, 7811, 16, 1083, 3541, 18, 225, 7012, 12181, 429...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1893, 67, 1615, 12, 6836, 16, 7811, 16, 290, 21261, 16, 5686, 4672, 3536, 990, 1053, 309, 326, 813, 4295, 864, 4325, 358, 326, 864, 7811, 16, 1083, 3541, 18, 225, 7012, 12181, 429...
return "file:///" + GetAbsolutePath(full_path)
if sys.platform in ('cygwin', 'win32'): return "file:///" + GetAbsolutePath(full_path) return "file://" + GetAbsolutePath(full_path)
def FilenameToUri(full_path): """Convert a test file to a URI.""" LAYOUTTEST_HTTP_DIR = "LayoutTests/http/tests/" PENDING_HTTP_DIR = "pending/http/tests/" relative_path = _WinPathToUnix(RelativeTestFilename(full_path)) port = None use_ssl = False # LayoutTests/http/tests/ run off port 8000 and ssl/ off 8443 if relative_path.startswith(LAYOUTTEST_HTTP_DIR): relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):] port = 8000 # pending/http/tests/ run off port 9000 and ssl/ off 9443 elif relative_path.startswith(PENDING_HTTP_DIR): relative_path = relative_path[len(PENDING_HTTP_DIR):] port = 9000 # chrome/http/tests run off of port 8081 with the full path elif relative_path.find("/http/") >= 0: port = 8081 # Make LayoutTests/http/tests/local run as local files. This is to mimic the # logic in run-webkit-tests. # TODO(jianli): Consider extending this to "media/". if port and not relative_path.startswith("local/"): if relative_path.startswith("ssl/"): port += 443 protocol = "https" else: protocol = "http" return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path) return "file:///" + GetAbsolutePath(full_path)
edaa7df641555a8403e995a0be487a4194cfdbdd /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/5060/edaa7df641555a8403e995a0be487a4194cfdbdd/path_utils.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 16671, 774, 3006, 12, 2854, 67, 803, 4672, 3536, 2723, 279, 1842, 585, 358, 279, 3699, 12123, 25239, 5069, 16961, 67, 3693, 67, 4537, 273, 315, 3744, 14650, 19, 2505, 19, 16341, 4898, 28...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 16671, 774, 3006, 12, 2854, 67, 803, 4672, 3536, 2723, 279, 1842, 585, 358, 279, 3699, 12123, 25239, 5069, 16961, 67, 3693, 67, 4537, 273, 315, 3744, 14650, 19, 2505, 19, 16341, 4898, 28...
for path, change in editor.changes.items(): if not (_is_path_within_scope(self.scope, path) and \ self.authz.has_permission(path)):
for key_path, change in editor.changes.items(): if not (_is_path_within_scope(self.scope, key_path) and \ self.authz.has_permission(key_path)):
def get_changes(self): pool = Pool(self.pool) tmp = Pool(pool) root = fs.revision_root(self.fs_ptr, self.rev, pool()) editor = repos.RevisionChangeCollector(self.fs_ptr, self.rev, pool()) e_ptr, e_baton = delta.make_editor(editor, pool()) repos.svn_repos_replay(root, e_ptr, e_baton, pool())
74965eff8fc97e9dd769fe4c0624c6400dcb2383 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2831/74965eff8fc97e9dd769fe4c0624c6400dcb2383/svn_fs.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 6329, 12, 2890, 4672, 2845, 273, 8828, 12, 2890, 18, 6011, 13, 1853, 273, 8828, 12, 6011, 13, 1365, 273, 2662, 18, 13057, 67, 3085, 12, 2890, 18, 2556, 67, 6723, 16, 365, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 6329, 12, 2890, 4672, 2845, 273, 8828, 12, 2890, 18, 6011, 13, 1853, 273, 8828, 12, 6011, 13, 1365, 273, 2662, 18, 13057, 67, 3085, 12, 2890, 18, 2556, 67, 6723, 16, 365, 18...
def _internal_poll(self, _deadstate=None):
def _internal_poll(self, _deadstate=None, _WaitForSingleObject=WaitForSingleObject, _WAIT_OBJECT_0=WAIT_OBJECT_0, _GetExitCodeProcess=GetExitCodeProcess):
def _internal_poll(self, _deadstate=None): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode is None: if(_subprocess.WaitForSingleObject(self._handle, 0) == _subprocess.WAIT_OBJECT_0): self.returncode = _subprocess.GetExitCodeProcess(self._handle) return self.returncode
238db7ba819d46924f61ad07c279e6a344651634 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12029/238db7ba819d46924f61ad07c279e6a344651634/subprocess.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 7236, 67, 13835, 12, 2890, 16, 389, 22097, 2019, 33, 7036, 16, 389, 29321, 5281, 921, 33, 29321, 5281, 921, 16, 389, 19046, 67, 9422, 67, 20, 33, 19046, 67, 9422, 67, 20, 16, 38...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 7236, 67, 13835, 12, 2890, 16, 389, 22097, 2019, 33, 7036, 16, 389, 29321, 5281, 921, 33, 29321, 5281, 921, 16, 389, 19046, 67, 9422, 67, 20, 33, 19046, 67, 9422, 67, 20, 16, 38...
(name, version, release, arch) = output.split() return (name, version, release, arch)
(name, version, release, subarch) = output.split() if subarch not in subarch_mapping.keys(): raise Exception("%s/%s has invalid subarch %s" % (path, filename, subarch)) return (name, version, release, subarch)
def parse_rpm(fullpath): """read the name, version, release, and arch of an rpm. this version reads the rpm headers. this version takes a full pathname argument.""" cmd = 'rpm --nosignature --queryformat \'%%{NAME} %%{VERSION} %%{RELEASE} %%{ARCH}\' -q -p %s' % (fullpath) output = run_or_die(cmd) (name, version, release, arch) = output.split() return (name, version, release, arch)
a3e467705967c9bd663a934cca8f3c746d746f6f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11867/a3e467705967c9bd663a934cca8f3c746d746f6f/rpmlisting.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 67, 86, 7755, 12, 2854, 803, 4672, 3536, 896, 326, 508, 16, 1177, 16, 3992, 16, 471, 6637, 434, 392, 25228, 18, 225, 333, 1177, 6838, 326, 25228, 1607, 18, 225, 333, 1177, 5530, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 67, 86, 7755, 12, 2854, 803, 4672, 3536, 896, 326, 508, 16, 1177, 16, 3992, 16, 471, 6637, 434, 392, 25228, 18, 225, 333, 1177, 6838, 326, 25228, 1607, 18, 225, 333, 1177, 5530, ...
14
49
def min_spanning_tree(self, weight_function=lambda e: 1, algorithm='Kruskal', starting_vertex=None ): """ Returns the edges of a minimum spanning tree, if one exists, otherwise returns False.
fae99ddaad3058abb143fcc5a405bfa8da3fb6ce /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9890/fae99ddaad3058abb143fcc5a405bfa8da3fb6ce/graph.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1131, 67, 1752, 10903, 67, 3413, 12, 2890, 16, 3119, 67, 915, 33, 14661, 425, 30, 404, 16, 4886, 2218, 47, 8010, 79, 287, 2187, 5023, 67, 15281, 33, 7036, 262, 30, 3536, 2860, 326, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1131, 67, 1752, 10903, 67, 3413, 12, 2890, 16, 3119, 67, 915, 33, 14661, 425, 30, 404, 16, 4886, 2218, 47, 8010, 79, 287, 2187, 5023, 67, 15281, 33, 7036, 262, 30, 3536, 2860, 326, 5...
notification_center.post_notification('DNSLookupDidFail', sender=self, data=NotificationData(error="No routes found"))
notification_center.post_notification('DNSLookupDidFail', sender=self, data=NotificationData(error="No routes found for SIP URI %s" % uri))
def lookup_sip_proxy(self, uri, supported_transports): """This function performs RFC 3263 compliant lookup of transport/ip/port combinations for a particular SIP URI. As arguments it takes a SIPURI object and a list of supported transports, in order of preference of the application. It returns a list of Route objects that can be used in order of preference.""" notification_center = NotificationCenter() if len(supported_transports) == 0: notification_center.post_notification('DNSLookupDidFail', sender=self, data=NotificationData(error="No transports are supported")) return for supported_transport in supported_transports: if supported_transport not in self._transport_srv_service_map: notification_center.post_notification('DNSLookupDidFail', sender=self, data=NotificationData(error="Unsupported transport: %s" % supported_transport)) return supported_transports = [transport.lower() for transport in supported_transports] # If the URI is a SIPS URI, only a TLS transport can be returned. if uri.secure: if "tls" not in supported_transports: notification_center.post_notification('DNSLookupDidFail', sender=self, data=NotificationData(error="Requested lookup for SIPS URI, but TLS transport is not supported")) return supported_transports = ["tls"] transport = None port = None ip = None srv_candidates = [] a_candidates = [] routes = [] # Check if the transport was already set as a parameter on the SIP URI. if uri.parameters and "transport" in uri.parameters: transport = uri.parameters["transport"] # Check if the port was already set, we can skip NAPTR/SRV lookup later if it is. if uri.port: port = uri.port # Check if the host part of the URI is a IP address, we can skip NAPTR/SRV lookup later if it is. if re.match("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", uri.host): ip = uri.host # Otherwise we can try NAPTR/SRV lookup. if port is None and ip is None: # Only do the NAPTR part if the transport was not specified as a URI parameter. if transport is None: try: naptr_answers = dns.resolver.query(uri.host, "NAPTR") except: # If NAPTR lookup fails for some reason, try SRV lookup for all supported transports. # Only the transports of those lookups that succeed are supported by the server. srv_candidates = [(transport, "%s.%s" % (self._transport_srv_service_map[transport], uri.host)) for transport in supported_transports] else: # If NAPTR lookup succeeds, order those entries that are applicable for SIP based on server prefernce. naptr_answers = [answer for answer in naptr_answers if answer.flags.lower() == "s" and answer.service.lower() in self._naptr_service_transport_map and self._naptr_service_transport_map[answer.service.lower()] in supported_transports] naptr_answers.sort(key=lambda x: x.preference) naptr_answers.sort(key=lambda x: x.order) if len(naptr_answers) == 0: notification_center.post_notification('DNSLookupDidFail', sender=self, data=NotificationData(error="Could find a suitable transport in NAPTR record of domain")) return srv_candidates = [(self._naptr_service_transport_map[answer.service.lower()], answer.replacement) for answer in naptr_answers] else: # Directly try the SRV record of the requested transport. srv_candidates = [(transport, "%s.%s" % (self._transport_srv_service_map[transport], uri.host))] for srv_transport, srv_qname in srv_candidates: try: srv_answers = dns.resolver.query(srv_qname, "SRV") except: # If SRV lookup fails, try A record directly for a transport that was requested, # otherwise UDP for a SIP URI, TLS for a SIPS URI. if transport is None: if (uri.secure and srv_transport == "tls") or (not uri.secure and srv_transport == "udp"): a_candidates.append((srv_transport, uri.host, 5061 if srv_transport == "tls" else 5060)) else: if transport == srv_transport: a_candidates.append((transport, uri.host, 5061 if transport == "tls" else 5060)) else: # If SRV lookup succeeds, sort the resulting hosts based on server preference. srv_answers = sorted(srv_answers, key=lambda x: x.priority) srv_answers.sort(key=lambda x: x.weight, reverse=True) for answer in srv_answers: a_candidates.append((srv_transport, answer.target, answer.port)) else: # If NAPT/SRV was skipped, fill in defaults for the other variables. if transport is None: if uri.secure: transport = "tls" else: if "udp" not in supported_transports: notification_center.post_notification('DNSLookupDidFail', sender=self, data=NotificationData(error="UDP transport is not suported")) return transport = "udp" if port is None: port = 5061 if uri.secure else 5060 # For an IP address, return this immedeately, otherwise do a lookup for the requested hostname. if ip is None: a_candidates.append((transport, uri.host, port)) else: notification_center.post_notification('DNSLookupDidSucceed', sender=self, data=NotificationData(result=[Route(ip, port=port, transport=transport)])) return # Keep results in a dictionary so we don't do double A record lookups a_cache = {} for a_transport, a_qname, a_port in a_candidates: try: if a_qname in a_cache: a_answers = a_cache[a_qname] else: a_answers = dns.resolver.query(a_qname, "A") a_cache[a_qname] = a_answers except: # If lookup fails then don't return this value pass else: for answer in a_answers: routes.append(Route(answer.address, port=a_port, transport=a_transport)) if routes: notification_center.post_notification('DNSLookupDidSucceed', sender=self, data=NotificationData(result=routes)) else: notification_center.post_notification('DNSLookupDidFail', sender=self, data=NotificationData(error="No routes found"))
9485d3e25e84e2bca195d18416dc1318b3911a8e /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/5703/9485d3e25e84e2bca195d18416dc1318b3911a8e/lookup.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3689, 67, 28477, 67, 5656, 12, 2890, 16, 2003, 16, 3260, 67, 2338, 4363, 4672, 3536, 2503, 445, 11199, 8372, 3847, 4449, 24820, 3689, 434, 4736, 19, 625, 19, 655, 17265, 364, 279, 6826, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3689, 67, 28477, 67, 5656, 12, 2890, 16, 2003, 16, 3260, 67, 2338, 4363, 4672, 3536, 2503, 445, 11199, 8372, 3847, 4449, 24820, 3689, 434, 4736, 19, 625, 19, 655, 17265, 364, 279, 6826, ...
_logger.debug("%s of activity %s failed: %s" % (verb, self._id, e))
_logger.debug("%s of activity %r failed: %s", verb, self, e)
def _join_failed_cb(self, e): verb = self._join_is_sharing and 'Share' or 'Join' _logger.debug("%s of activity %s failed: %s" % (verb, self._id, e)) throw_into_callback(self._join_err_cb, e)
25577b1486c01197df936bb991230734bc38df7b /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/3113/25577b1486c01197df936bb991230734bc38df7b/activity.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 5701, 67, 7307, 67, 7358, 12, 2890, 16, 425, 4672, 6405, 273, 365, 6315, 5701, 67, 291, 67, 31615, 471, 296, 9535, 11, 578, 296, 4572, 11, 389, 4901, 18, 4148, 27188, 87, 434, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 5701, 67, 7307, 67, 7358, 12, 2890, 16, 425, 4672, 6405, 273, 365, 6315, 5701, 67, 291, 67, 31615, 471, 296, 9535, 11, 578, 296, 4572, 11, 389, 4901, 18, 4148, 27188, 87, 434, 5...
(code,msg)=self.getreply() return msg
return self.getreply()
def help(self, args=''): """SMTP 'help' command. Returns help text from server.""" self.putcmd("help", args) (code,msg)=self.getreply() return msg
296e14301a7aa23a5ee2bfaa1210af54e594cbbf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/296e14301a7aa23a5ee2bfaa1210af54e594cbbf/smtplib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2809, 12, 2890, 16, 833, 2218, 11, 4672, 3536, 55, 14636, 296, 5201, 11, 1296, 18, 2860, 2809, 977, 628, 1438, 12123, 365, 18, 458, 4172, 2932, 5201, 3113, 833, 13, 261, 710, 16, 3576,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2809, 12, 2890, 16, 833, 2218, 11, 4672, 3536, 55, 14636, 296, 5201, 11, 1296, 18, 2860, 2809, 977, 628, 1438, 12123, 365, 18, 458, 4172, 2932, 5201, 3113, 833, 13, 261, 710, 16, 3576,...
def on_selection_changed(self, selection):
def on_treeview_button_release(self, widget, event): selection = widget.get_selection()
def on_selection_changed(self, selection): model, iter = selection.get_selected() if iter is not None: model.set_value(iter, self.COLUMN_TOGGLE, \ not model.get_value(iter, self.COLUMN_TOGGLE)) self.calculate_total_size()
a10be3cedc969c035a88e7895c249c1e3fea2ea2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12778/a10be3cedc969c035a88e7895c249c1e3fea2ea2/episodeselector.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 67, 3413, 1945, 67, 5391, 67, 9340, 12, 2890, 16, 3604, 16, 871, 4672, 4421, 273, 3604, 18, 588, 67, 10705, 1435, 938, 16, 1400, 273, 4421, 18, 588, 67, 8109, 1435, 309, 1400, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 67, 3413, 1945, 67, 5391, 67, 9340, 12, 2890, 16, 3604, 16, 871, 4672, 4421, 273, 3604, 18, 588, 67, 10705, 1435, 938, 16, 1400, 273, 4421, 18, 588, 67, 8109, 1435, 309, 1400, 3...
self.points = [] self.colours = []
self.points = [] self.colours = []
def __init__(self, parent, id): """ Constructor. parent -- parent of this frame """ super(CLUTRaycastingWidget, self).__init__(parent, id) self.points = []#plistlib.readPlist(sys.argv[-1])['16bitClutCurves'] self.colours = []#plistlib.readPlist(sys.argv[-1])['16bitClutColors'] self.init = -1024 self.end = 2000 self.padding = 5 self.to_render = False self.to_draw_points = 0 self.histogram_pixel_points = [[0,0]] self.histogram_array = [100,100] self.CreatePixelArray() #self.sizer = wx.BoxSizer(wx.HORIZONTAL) #self.SetSizer(self.sizer) #self.DrawControls() self.dragged = False self.point_dragged = None self.DoBind() #self.__bind_events() #self.SetAutoLayout(True) #self.sizer.Fit(self) self.Show() #self.LoadVolume()
6a2fe195f10ee152dee13c99203f19c8eda531e8 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/10228/6a2fe195f10ee152dee13c99203f19c8eda531e8/clut_raycasting.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 982, 16, 612, 4672, 3536, 11417, 18, 225, 982, 1493, 982, 434, 333, 2623, 3536, 2240, 12, 5017, 1693, 54, 528, 4155, 310, 4609, 16, 365, 2934, 972, 2738,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 982, 16, 612, 4672, 3536, 11417, 18, 225, 982, 1493, 982, 434, 333, 2623, 3536, 2240, 12, 5017, 1693, 54, 528, 4155, 310, 4609, 16, 365, 2934, 972, 2738,...
elif props.has_key("name") and props.has_key("arch"): self.__parseFilelist(reader, props["name"], props["arch"])
def __parseNode(self, reader): while reader.Read() == 1: if reader.NodeType() == TYPE_ELEMENT and \ reader.Name() == "package": props = getProps(reader) if props.get("type") == "rpm": pkg = self.__parsePackage(reader) if self.readsrc or pkg["arch"] != "src": self.pkglist[pkg.getNEVRA0()] = pkg elif props.has_key("name") and props.has_key("arch"): self.__parseFilelist(reader, props["name"], props["arch"])
427871e48b993cbe1bec0e26f0693386c8014c95 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/1143/427871e48b993cbe1bec0e26f0693386c8014c95/oldpyrpm.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2670, 907, 12, 2890, 16, 2949, 4672, 1323, 2949, 18, 1994, 1435, 422, 404, 30, 309, 2949, 18, 15101, 1435, 422, 3463, 67, 10976, 471, 521, 2949, 18, 461, 1435, 422, 315, 5610, 68...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2670, 907, 12, 2890, 16, 2949, 4672, 1323, 2949, 18, 1994, 1435, 422, 404, 30, 309, 2949, 18, 15101, 1435, 422, 3463, 67, 10976, 471, 521, 2949, 18, 461, 1435, 422, 315, 5610, 68...
sim.getRNG().setSeed(12345)
sim.getRNG().set(seed=12345)
def simulate(): pop = sim.Population(1000, loci=10, infoFields='age') pop.evolve( initOps=[ sim.InitSex(), sim.InitGenotype(freq=[0.5, 0.5]), sim.InitInfo(lambda: random.randint(0, 10), infoFields='age') ], matingScheme=sim.RandomMating(), finalOps=sim.Stat(alleleFreq=0), gen=100 ) return pop.dvars().alleleFreq[0][0]
205f55f8c6cd28aed704d70838912648f07ac340 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/401/205f55f8c6cd28aed704d70838912648f07ac340/userGuide.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 20089, 13332, 1843, 273, 3142, 18, 31821, 12, 18088, 16, 1515, 77, 33, 2163, 16, 1123, 2314, 2218, 410, 6134, 1843, 18, 14965, 5390, 12, 1208, 8132, 22850, 3142, 18, 2570, 55, 338, 9334,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 20089, 13332, 1843, 273, 3142, 18, 31821, 12, 18088, 16, 1515, 77, 33, 2163, 16, 1123, 2314, 2218, 410, 6134, 1843, 18, 14965, 5390, 12, 1208, 8132, 22850, 3142, 18, 2570, 55, 338, 9334,...
errorMsg = orangemsg("Clipboard item is an empty group."\
errorMsg = orangemsg("Clipboard item is probably an empty group."\
def _pasteGroup(self, groupToPaste, mousePosition = None): """ Paste the given group (and all its members) in the 3 D workspace. @param groupToPaste: The group to be pasted in the 3D workspace @type groupToPaste: L{Group} @param mousePosition: These are the coordinates during mouse double click. @type mousePosition: Array containing the x, y, z positions on the screen or 'None' @see: L{self.paste} for implementation notes. """ assert isinstance(groupToPaste, Group) pastable = groupToPaste pos = mousePosition newGroup = None errorMsg = None moveOffset = V(0, 0, 0) assy = self.assy newGroup = Group(pastable.name, assy, None) # Review: should this use Group or groupToPaste.__class__, # e.g. re a DnaGroup or DnaSegment? [bruce 080314 question] nodes = list(pastable.members) newNodeList = copied_nodes_for_DND(nodes, autogroup_at_top = False, assy = assy) if not newNodeList: errorMsg = orangemsg("Clipboard item is an empty group."\ "Paste cancelled") # review: is this claim about the cause always correct? return newGroup, errorMsg chunkList = [] for newNode in newNodeList: if isinstance(newNode, Chunk): chunkList.append(newNode) if chunkList: boundingBox = BBox() for m in chunkList: boundingBox.merge(m.bbox) approxCenter = boundingBox.center() scale = float(boundingBox.scale() * 0.06) if scale < 0.001: scale = 0.1 else: approxCenter = V(0.01, 0.01, 0.01) scale = 0.1 if pos: moveOffset = pos - approxCenter else: moveOffset = scale * self.assy.o.right moveOffset += scale * self.assy.o.down for newNode in newNodeList: newNode.move(moveOffset) newGroup.addmember(newNode) assy.addnode(newGroup) return newGroup, errorMsg
f8f2489732dadd8ad6f66ca6d06b74cd1a4a3b8b /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/11221/f8f2489732dadd8ad6f66ca6d06b74cd1a4a3b8b/ops_copy.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 29795, 1114, 12, 2890, 16, 1041, 774, 52, 14725, 16, 7644, 2555, 273, 599, 4672, 3536, 453, 14725, 326, 864, 1041, 261, 464, 777, 2097, 4833, 13, 316, 326, 890, 463, 6003, 18, 632...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 29795, 1114, 12, 2890, 16, 1041, 774, 52, 14725, 16, 7644, 2555, 273, 599, 4672, 3536, 453, 14725, 326, 864, 1041, 261, 464, 777, 2097, 4833, 13, 316, 326, 890, 463, 6003, 18, 632...
""")
""").replace('\n', os.linesep)
def testBufferOutputAddErrorOrFailure(self): for message_attr, add_attr, include_error in [ ('errors', 'addError', True), ('failures', 'addFailure', False), ('errors', 'addError', True), ('failures', 'addFailure', False) ]: result = self.getStartedResult() buffered_out = sys.stdout buffered_err = sys.stderr result._original_stderr = StringIO() result._original_stdout = StringIO() print >> sys.stdout, 'foo' if include_error: print >> sys.stderr, 'bar' addFunction = getattr(result, add_attr) addFunction(self, (None, None, None)) result.stopTest(self) result_list = getattr(result, message_attr) self.assertEqual(len(result_list), 1) test, message = result_list[0] expectedOutMessage = textwrap.dedent(""" Stdout: foo """) expectedErrMessage = '' if include_error: expectedErrMessage = textwrap.dedent(""" Stderr: bar """) expectedFullMessage = 'None\n%s%s' % (expectedOutMessage, expectedErrMessage)
52d784712d3f8166a42a5f0fa0b668d8ae77f594 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7464/52d784712d3f8166a42a5f0fa0b668d8ae77f594/test_result.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 1892, 1447, 986, 668, 1162, 5247, 12, 2890, 4672, 364, 883, 67, 1747, 16, 527, 67, 1747, 16, 2341, 67, 1636, 316, 306, 7707, 4324, 2187, 296, 1289, 668, 2187, 1053, 3631, 7707, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 1892, 1447, 986, 668, 1162, 5247, 12, 2890, 4672, 364, 883, 67, 1747, 16, 527, 67, 1747, 16, 2341, 67, 1636, 316, 306, 7707, 4324, 2187, 296, 1289, 668, 2187, 1053, 3631, 7707, 2...
if node.prop("pre") == "1": prereq = RPMSENSE_PREREQ else: prereq = 0
def __parseDeps(self, node, pkg): plist = [] plist.append([]) plist.append([]) plist.append([]) while node != None: if node.type != "element": node = node.next continue if node.name == "entry": name = node.prop("name") flags = node.prop("flags") ver = node.prop("ver") if ver == None: plist[0].append(name) plist[1].append(0) plist[2].append("") node = node.next continue epoch = node.prop("epoch") rel = node.prop("rel") if epoch != None: ver = "%s:%s" % (epoch, ver) if rel != None: ver = "%s-%s" % (ver, rel) plist[0].append(name) if node.prop("pre") == "1": prereq = RPMSENSE_PREREQ else: prereq = 0 plist[1].append(self.flagmap[flags] + prereq) plist[2].append(ver) node = node.next return plist
ec79f0785afb7165fd3d6843caaa4524fc446ddd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/1143/ec79f0785afb7165fd3d6843caaa4524fc446ddd/io.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2670, 14430, 12, 2890, 16, 756, 16, 3475, 4672, 21074, 273, 5378, 21074, 18, 6923, 3816, 5717, 21074, 18, 6923, 3816, 5717, 21074, 18, 6923, 3816, 5717, 1323, 756, 480, 599, 30, 30...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2670, 14430, 12, 2890, 16, 756, 16, 3475, 4672, 21074, 273, 5378, 21074, 18, 6923, 3816, 5717, 21074, 18, 6923, 3816, 5717, 21074, 18, 6923, 3816, 5717, 1323, 756, 480, 599, 30, 30...
ans = psi(n+2) * psi(n)**3 - \ psi(n-1) * psi(n+1)**3
ans = psi(n+2) * psi(n)**3 - psi(n-1) * psi(n+1)**3
def full_division_polynomial(self, m): """ Return the m-th bivariate division polynomial in x and y.
84dad5dfc790d5dacf9589fdd0cfafb884726e83 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9417/84dad5dfc790d5dacf9589fdd0cfafb884726e83/ell_generic.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1983, 67, 2892, 1951, 67, 3915, 13602, 12, 2890, 16, 312, 4672, 3536, 2000, 326, 312, 17, 451, 324, 27693, 16536, 16991, 316, 619, 471, 677, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1983, 67, 2892, 1951, 67, 3915, 13602, 12, 2890, 16, 312, 4672, 3536, 2000, 326, 312, 17, 451, 324, 27693, 16536, 16991, 316, 619, 471, 677, 18, 2, -100, -100, -100, -100, -100, -100, ...
delete_all = 1
def checkPermissionManageObjects(self, p_object=None): """ """ if p_object is None: p_object = self.getObjects() results = [] select_all = 0 delete_all = 0 flag = 0 for obj in self.utSortObjsListByAttr(p_object, 'sortorder', 0): del_permission = obj.checkPermissionDeleteObject() if del_permission == 1 and flag == 0: flag = 1 select_all = 1 delete_all = 1 edit_permission = obj.checkPermissionEditObject() if edit_permission == 1 and flag == 0: flag = 1 select_all = 1 delete_all = 1 if ((del_permission or edit_permission) and not obj.approved) or obj.approved: results.append((del_permission, edit_permission, obj)) return (select_all, delete_all, results)
05be38035c44ab4c82255a785d26d076f6985f1f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3287/05be38035c44ab4c82255a785d26d076f6985f1f/NyFolder.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 25300, 21258, 4710, 12, 2890, 16, 293, 67, 1612, 33, 7036, 4672, 3536, 3536, 309, 293, 67, 1612, 353, 599, 30, 293, 67, 1612, 273, 365, 18, 588, 4710, 1435, 1686, 273, 5378, 2027, 67, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 25300, 21258, 4710, 12, 2890, 16, 293, 67, 1612, 33, 7036, 4672, 3536, 3536, 309, 293, 67, 1612, 353, 599, 30, 293, 67, 1612, 273, 365, 18, 588, 4710, 1435, 1686, 273, 5378, 2027, 67, ...
if f.find("-done-"): fp = file(f, "r")
if re.search("^bws-[\S]+-done-", f): fp = file(d+"/"+f, "r")
def main(argv): for d in argv[1:-1]: # First, create a list of the most recent files in the # scan dirs that are recent enough for root, dirs, files in os.walk(d): for f in files: if f.find("-done-"): fp = file(f, "r") ranks = fp.readline() timestamp = float(fp.readline()) fp.close() if ranks not in bw_files or bw_files[ranks][0] < timestamp: bw_files[ranks] = (timestamp, f) for (t,f) in bw_files.itervalues(): fp = file(f, "r") fp.readline() fp.readline() for l in fp.readlines(): line = Line(l) if line.idhex not in nodes: n = Node() nodes[line.idhex] = n else: n = nodes[line.idhex] n.add_line(line) fp.close() pre_strm_avg = sum(map(lambda n: n.avg_strm_bw(), nodes.itervalues()))/ \ float(len(nodes)) pre_filt_avg = sum(map(lambda n: n.avg_filt_bw(), nodes.itervalues()))/ \ float(len(nodes)) for n in nodes.itervalues(): n.choose_strm_bw(pre_strm_avg) n.choose_filt_bw(pre_filt_avg) true_strm_avg = sum(map(lambda n: n.chosen_sbw, nodes.itervalues()))/ \ float(len(nodes)) true_filt_avg = sum(map(lambda n: n.chosen_fbw, nodes.itervalues()))/ \ float(len(nodes)) for n in nodes.itervalues(): n.fbw_ratio = n.filt_bw[n.chosen_fbw]/true_filt_avg n.sbw_ratio = n.strm_bw[n.chosen_sbw]/true_strm_avg if closest_to_one((n.sbw_ratio, n.fbw_ratio)) == 0: n.ratio = n.sbw_ratio n.new_bw = n.ns_bw[n.chosen_sbw]*n.ratio else: n.ratio = n.fbw_ratio n.new_bw = n.ns_bw[n.chosen_fbw]*n.ratio n_print = nodes.values() n_print.sort(lambda x,y: x.new_bw < y.new_bw) oldest_timestamp = min(map(lambda (t,f): t, bw_files.itervalues())) out = file(argv[-1], "w") out.write(str(int(round(oldest_timestamp,0)))+"\n") for n in n_print: out.write("node_id="+n.idhex+" bw="+str(base10_round(n.new_bw))+"\n") out.close()
f83bead330fe47785c58ea75d371332044c48c57 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/3762/f83bead330fe47785c58ea75d371332044c48c57/aggregate.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 12, 19485, 4672, 364, 302, 316, 5261, 63, 21, 30, 17, 21, 14542, 468, 5783, 16, 752, 279, 666, 434, 326, 4486, 8399, 1390, 316, 326, 468, 4135, 7717, 716, 854, 8399, 7304, 364, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 12, 19485, 4672, 364, 302, 316, 5261, 63, 21, 30, 17, 21, 14542, 468, 5783, 16, 752, 279, 666, 434, 326, 4486, 8399, 1390, 316, 326, 468, 4135, 7717, 716, 854, 8399, 7304, 364, ...
if param == 'gps-end-time': self.__end = value if param == 'gps-start-time': self.__start = value
if param == 'gps-end-time': self.__end = value self._AnalysisNode__end = int(value) if param == 'gps-start-time': self.__start = value self._AnalysisNode__start = int(value) if param == 'pad-data': self._InspiralAnalysisNode__pad_data = int(value)
def __init__(self, inspJob, procParams, ifo, trig, cp,opts,dag, datafindCache, d_node, datafindCommand, type='plot', sngl_table = None):
4ffcce48ad76d77ebd15ed306f17cc9f9b02c633 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/3592/4ffcce48ad76d77ebd15ed306f17cc9f9b02c633/fu_Condor.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 316, 1752, 2278, 16, 5418, 1370, 16, 21479, 16, 23142, 16, 3283, 16, 4952, 16, 30204, 16, 501, 4720, 1649, 16, 302, 67, 2159, 16, 501, 4720, 2189, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 316, 1752, 2278, 16, 5418, 1370, 16, 21479, 16, 23142, 16, 3283, 16, 4952, 16, 30204, 16, 501, 4720, 1649, 16, 302, 67, 2159, 16, 501, 4720, 2189, 16, ...
copy_if_size_differs(vm, tarred_test_path, test_name + ".tar.bz2")
copy_if_size_differs(vm, tarred_test_path, tarred_test_path)
def extract(vm, remote_path, dest_dir="."): """ Extract a .tar.bz2 file on the guest.
9a0ed8a302f92cd95aef5e2febbdd589f45c65ce /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/10349/9a0ed8a302f92cd95aef5e2febbdd589f45c65ce/autotest.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2608, 12, 3489, 16, 2632, 67, 803, 16, 1570, 67, 1214, 1546, 1199, 4672, 3536, 8152, 279, 263, 11718, 18, 25292, 22, 585, 603, 326, 13051, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2608, 12, 3489, 16, 2632, 67, 803, 16, 1570, 67, 1214, 1546, 1199, 4672, 3536, 8152, 279, 263, 11718, 18, 25292, 22, 585, 603, 326, 13051, 18, 2, -100, -100, -100, -100, -100, -100, -1...
with test_support.guard_warnings_filter():
with test_support.catch_warning():
def test_touched(self): # This really only tests that nothing unforeseen happens. import warnings with test_support.guard_warnings_filter(): warnings.filterwarnings('ignore', 'macostools.touched*', DeprecationWarning) macostools.touched(test_support.TESTFN)
4f1268a11afea97770f5b7a7838d66ce2c0ab744 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/3187/4f1268a11afea97770f5b7a7838d66ce2c0ab744/test_macostools.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 869, 19370, 12, 2890, 4672, 468, 1220, 8654, 1338, 7434, 716, 5083, 640, 1405, 15156, 10555, 18, 1930, 5599, 598, 1842, 67, 13261, 18, 14683, 67, 8551, 13332, 5599, 18, 2188, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 869, 19370, 12, 2890, 4672, 468, 1220, 8654, 1338, 7434, 716, 5083, 640, 1405, 15156, 10555, 18, 1930, 5599, 598, 1842, 67, 13261, 18, 14683, 67, 8551, 13332, 5599, 18, 2188, 1...
and child.tagName in FIXUP_PARA_ELEMENTS:
and child.tagName in RECURSE_INTO_PARA_CONTAINERS:
def fixup_paras(doc): for child in doc.childNodes: if child.nodeType == xml.dom.core.ELEMENT \ and child.tagName in FIXUP_PARA_ELEMENTS: fixup_paras_helper(doc, child) descriptions = child.getElementsByTagName("description") for description in descriptions: if DEBUG_PARA_FIXER: sys.stderr.write("-- Fixing up <description> element...\n") fixup_paras_helper(doc, description)
8d20f30e2bb3a6cee170c7f74006722d05264c26 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8125/8d20f30e2bb3a6cee170c7f74006722d05264c26/docfixer.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2917, 416, 67, 1065, 345, 12, 2434, 4672, 364, 1151, 316, 997, 18, 3624, 3205, 30, 309, 1151, 18, 2159, 559, 422, 2025, 18, 9859, 18, 3644, 18, 10976, 521, 471, 1151, 18, 2692, 461, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2917, 416, 67, 1065, 345, 12, 2434, 4672, 364, 1151, 316, 997, 18, 3624, 3205, 30, 309, 1151, 18, 2159, 559, 422, 2025, 18, 9859, 18, 3644, 18, 10976, 521, 471, 1151, 18, 2692, 461, ...
docs = filter(lambda x:not _is_private(x.name()), docs)
docs = [d for d in docs if not self._is_private(d.name())]
def _sort(self, docs, sort_order=None): """ Sort and filter a list of C{ObjDoc}s. In particular, if C{sort_order} is not C{None}, then sort according to its contents; otherwise, sort using L{_cmp_name}. If L{_show_private} is true, then filter out all private objects; otherwise, perform no filtering.
c953b2de7aa23b0dce9a80366ed13af227c1f7f3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3512/c953b2de7aa23b0dce9a80366ed13af227c1f7f3/html.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 3804, 12, 2890, 16, 3270, 16, 1524, 67, 1019, 33, 7036, 4672, 3536, 5928, 471, 1034, 279, 666, 434, 385, 95, 2675, 1759, 97, 87, 18, 225, 657, 6826, 16, 309, 385, 95, 3804, 67, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 3804, 12, 2890, 16, 3270, 16, 1524, 67, 1019, 33, 7036, 4672, 3536, 5928, 471, 1034, 279, 666, 434, 385, 95, 2675, 1759, 97, 87, 18, 225, 657, 6826, 16, 309, 385, 95, 3804, 67, ...
def send(mfrom, mto, subject, body, composer=Composer(),
def send(From, To, subject, body, composer=Composer(),
def send(mfrom, mto, subject, body, composer=Composer(), sender=SendmailSender()): message = composer.compose(body, From=mfrom, To=mto, Subject=subject) return sender.send(message)
3857d11d8b4d17b3b11f942449d06b9705b5f156 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/1871/3857d11d8b4d17b3b11f942449d06b9705b5f156/qMail.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1366, 12, 1265, 16, 2974, 16, 3221, 16, 1417, 16, 8561, 33, 14404, 9334, 5793, 33, 3826, 4408, 12021, 1435, 4672, 883, 273, 8561, 18, 23658, 12, 3432, 16, 6338, 33, 81, 2080, 16, 2974,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1366, 12, 1265, 16, 2974, 16, 3221, 16, 1417, 16, 8561, 33, 14404, 9334, 5793, 33, 3826, 4408, 12021, 1435, 4672, 883, 273, 8561, 18, 23658, 12, 3432, 16, 6338, 33, 81, 2080, 16, 2974,...
print "ERROR: System %s already has instance %s defined in %s Setup" % (system,instance,hostSetup) return
print "ERROR: System %s already has instance %s defined in %s Setup" % ( system, instance, hostSetup ) return
def do_add( self, args ): """ Add new entity to the Configuration Service usage: add system <system> <instance> """ argss = args.split() option = argss[0] del argss[0] if option == "instance" or option == "system": system = argss[0] instance = argss[1] client = SystemAdministratorClient( self.host, self.port ) result = client.getInfo() if not result['OK']: print "Error:", result['Message'] hostSetup = result['Value']['Setup'] instanceName = gConfig.getValue('/DIRAC/Setups/%s/%s' % (hostSetup,system),'') if instanceName: if instanceName == instance: print "System %s already has instance %s defined in %s Setup" % (system,instance,hostSetup) else: print "ERROR: System %s already has instance %s defined in %s Setup" % (system,instance,hostSetup) return result = InstallTools.addSystemInstance( system, instance, hostSetup ) if not result['OK']: print "ERROR:", result['Message'] else: print "%s system instance %s added successfully" % ( system, instance ) else: print "Unknown option:", option
3f21e8ff4ce8897d3ec22f33d1dd4f408a5a68de /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12864/3f21e8ff4ce8897d3ec22f33d1dd4f408a5a68de/SystemAdministratorClientCLI.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 67, 1289, 12, 365, 16, 833, 262, 30, 3536, 1436, 394, 1522, 358, 326, 4659, 1956, 225, 4084, 30, 225, 527, 2619, 411, 4299, 34, 411, 1336, 34, 3536, 833, 87, 273, 833, 18, 4939,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 741, 67, 1289, 12, 365, 16, 833, 262, 30, 3536, 1436, 394, 1522, 358, 326, 4659, 1956, 225, 4084, 30, 225, 527, 2619, 411, 4299, 34, 411, 1336, 34, 3536, 833, 87, 273, 833, 18, 4939,...
try: major = int(major_minor[0]) minor = int(major_minor[1]) except ValueError: continue
major = major_minor[0] minor = major_minor[1]
def EnumTlbs(excludeFlags = 0): """Return a list of TypelibSpec objects, one for each registered library. """ key = win32api.RegOpenKey(win32con.HKEY_CLASSES_ROOT, "Typelib") iids = EnumKeys(key) results = [] for iid, crap in iids: try: key2 = win32api.RegOpenKey(key, str(iid)) except win32api.error: # A few good reasons for this, including "access denied". continue for version, tlbdesc in EnumKeys(key2): major_minor = string.split(version, '.', 1) if len(major_minor) < 2: major_minor.append('0') try: # For some reason, this code used to assume the values were hex. # This seems to not be true - particularly for CDO 1.21 # *sigh* - it appears there are no rules here at all, so when we need # to know the info, we must load the tlb by filename and request it. # The Resolve() method on the TypelibSpec does this. major = int(major_minor[0]) minor = int(major_minor[1]) except ValueError: # crap in the registry! continue key3 = win32api.RegOpenKey(key2, str(version)) try: # The "FLAGS" are at this point flags = int(win32api.RegQueryValue(key3, "FLAGS")) except (win32api.error, ValueError): flags = 0 if flags & excludeFlags==0: for lcid, crap in EnumKeys(key3): try: lcid = int(lcid) except ValueError: # not an LCID entry continue # Only care about "{lcid}\win32" key - jump straight there. try: key4 = win32api.RegOpenKey(key3, "%s\\win32" % (lcid,)) except win32api.error: continue try: dll, typ = win32api.RegQueryValueEx(key4, None) if typ==win32con.REG_EXPAND_SZ: dll = win32api.ExpandEnvironmentStrings(dll) except win32api.error: dll = None spec = TypelibSpec(iid, lcid, major, minor, flags) spec.dll = dll spec.desc = tlbdesc spec.ver_desc = tlbdesc + " (" + version + ")" results.append(spec) return results
0c6126683a083419c7e80e6f3b286e93f8ac4da6 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/677/0c6126683a083419c7e80e6f3b286e93f8ac4da6/selecttlb.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6057, 56, 80, 2038, 12, 10157, 5094, 273, 374, 4672, 3536, 990, 279, 666, 434, 16973, 30575, 1990, 2184, 16, 1245, 364, 1517, 4104, 5313, 18, 3536, 498, 273, 5657, 1578, 2425, 18, 1617, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6057, 56, 80, 2038, 12, 10157, 5094, 273, 374, 4672, 3536, 990, 279, 666, 434, 16973, 30575, 1990, 2184, 16, 1245, 364, 1517, 4104, 5313, 18, 3536, 498, 273, 5657, 1578, 2425, 18, 1617, ...
url = string.strip(url[1:-1]) if url[:4] == 'URL:': url = string.strip(url[4:])
url = url[1:-1].strip() if url[:4] == 'URL:': url = url[4:].strip()
def unwrap(url): """unwrap('<URL:type://host/path>') --> 'type://host/path'.""" url = string.strip(url) if url[:1] == '<' and url[-1:] == '>': url = string.strip(url[1:-1]) if url[:4] == 'URL:': url = string.strip(url[4:]) return url
71b965f6e0b5f68cbaa0e8ee02f092736e43e951 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3187/71b965f6e0b5f68cbaa0e8ee02f092736e43e951/urllib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 11014, 12, 718, 4672, 3536, 318, 4113, 2668, 32, 1785, 30, 723, 2207, 2564, 19, 803, 1870, 13, 15431, 296, 723, 2207, 2564, 19, 803, 11, 12123, 880, 273, 533, 18, 6406, 12, 718, 13, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 11014, 12, 718, 4672, 3536, 318, 4113, 2668, 32, 1785, 30, 723, 2207, 2564, 19, 803, 1870, 13, 15431, 296, 723, 2207, 2564, 19, 803, 11, 12123, 880, 273, 533, 18, 6406, 12, 718, 13, ...
matches = matches_a.intersection(matches_b) if not matches: skipped.append(u'%s /%s/' % (surface or reading, surface))
if surface: matches = matches.intersection( [s.lexeme for s in lexicon_models.LexemeSurface.objects.filter( surface=surface)] ) if len(matches) == 0: skipped.append(u'%s /%s/ (no match)' % ( surface or reading, scripts.toHiragana(reading) ))
def _find_matching_lexeme(reading, surface=None, skipped=None): """Finds a uniquely matching lexeme for this specification.""" if surface is None: surface = reading if skipped is None: skipped = [] matches_a = set( [s.lexeme for s in lexicon_models.LexemeSurface.objects.filter( surface=surface)] ) matches_b = set( [r.lexeme for r in lexicon_models.LexemeReading.objects.filter( reading=reading)] ) matches = matches_a.intersection(matches_b) if not matches: skipped.append(u'%s /%s/' % (surface or reading, surface)) return None (unique_match,) = list(matches) return unique_match
6dde0140bfc6a155eb4653c59b4b7540f83c6385 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/1796/6dde0140bfc6a155eb4653c59b4b7540f83c6385/add_syllabus.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 4720, 67, 16271, 67, 4149, 4698, 12, 21803, 16, 9034, 33, 7036, 16, 9700, 33, 7036, 4672, 3536, 8947, 279, 30059, 3607, 5275, 4698, 364, 333, 7490, 12123, 309, 9034, 353, 599, 30, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 4720, 67, 16271, 67, 4149, 4698, 12, 21803, 16, 9034, 33, 7036, 16, 9700, 33, 7036, 4672, 3536, 8947, 279, 30059, 3607, 5275, 4698, 364, 333, 7490, 12123, 309, 9034, 353, 599, 30, ...
m = __import__(name) return self.rexec.copy_except(m, ())
m = __import__(name) return self.rexec.copy_except(m, ())
def init_builtin(self, name):
ce7c76df1b92a89c9b48d025af1313d1dcf28c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ce7c76df1b92a89c9b48d025af1313d1dcf28c4f/rexec.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1208, 67, 24553, 12, 2890, 16, 508, 4672, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1208, 67, 24553, 12, 2890, 16, 508, 4672, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint/', ''))
tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint', ''))
def tester(fn, wantResult): fn = string.replace(fn, "\\", "\\\\") gotResult = eval(fn) if wantResult != gotResult: print "error!" print "evaluated: " + str(fn) print "should be: " + str(wantResult) print " returned: " + str(gotResult) print "" global errors errors = errors + 1
2db8d40b554dbf3f9ff306aae5242830f909e7e9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3187/2db8d40b554dbf3f9ff306aae5242830f909e7e9/test_ntpath.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 268, 7654, 12, 4293, 16, 2545, 1253, 4672, 2295, 273, 533, 18, 2079, 12, 4293, 16, 14520, 16, 28040, 2412, 13, 2363, 1253, 273, 5302, 12, 4293, 13, 309, 2545, 1253, 480, 2363, 1253, 30...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 268, 7654, 12, 4293, 16, 2545, 1253, 4672, 2295, 273, 533, 18, 2079, 12, 4293, 16, 14520, 16, 28040, 2412, 13, 2363, 1253, 273, 5302, 12, 4293, 13, 309, 2545, 1253, 480, 2363, 1253, 30...
self.assertEqual(rarf.read(read_bytes), rawf.read(read_bytes), 'mismatch in random read')
self.assertEqual(rarf.read(1), rawf.read(1), 'mismatch in random read')
def verify_read_random_from_start(self, rar_file, raw_file): file_size = os.path.getsize(raw_file) rarf = open(rar_file, 'r') rawf = open(raw_file, 'r') read_bytes = 10 for i in xrange(0, 10000): # get random number rb = random.randrange(0, file_size-10) # align on 10 char boundary byte = rb - ((rb + 10) % 10) # make exception if test file is really small if file_size <= 10: byte = 0 read_bytes = file_size
4ae600c948a0104df4a1c8b4348829bd1dfe5c1f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12662/4ae600c948a0104df4a1c8b4348829bd1dfe5c1f/test-read.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3929, 67, 896, 67, 9188, 67, 2080, 67, 1937, 12, 2890, 16, 436, 297, 67, 768, 16, 1831, 67, 768, 4672, 585, 67, 1467, 273, 1140, 18, 803, 18, 588, 1467, 12, 1899, 67, 768, 13, 436,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3929, 67, 896, 67, 9188, 67, 2080, 67, 1937, 12, 2890, 16, 436, 297, 67, 768, 16, 1831, 67, 768, 4672, 585, 67, 1467, 273, 1140, 18, 803, 18, 588, 1467, 12, 1899, 67, 768, 13, 436,...
self._reformResidual(t, dt)
self._reformResidual(t+dt, dt)
def step(self, t, dt): """ Advance to next time step. """ logEvent = "%sstep" % self._loggingPrefix self._logger.eventBegin(logEvent)
14c17a3fb75c3c759504037e7ef6015a3ed7fe9e /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/8645/14c17a3fb75c3c759504037e7ef6015a3ed7fe9e/Implicit.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2235, 12, 2890, 16, 268, 16, 3681, 4672, 3536, 4052, 5882, 358, 1024, 813, 2235, 18, 3536, 25424, 273, 2213, 87, 4119, 6, 738, 365, 6315, 11167, 2244, 365, 6315, 4901, 18, 2575, 8149, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2235, 12, 2890, 16, 268, 16, 3681, 4672, 3536, 4052, 5882, 358, 1024, 813, 2235, 18, 3536, 25424, 273, 2213, 87, 4119, 6, 738, 365, 6315, 11167, 2244, 365, 6315, 4901, 18, 2575, 8149, ...
self.putcmd("helo", _get_fqdn_hostname(name))
if name: self.putcmd("helo", name) else: self.putcmd("helo", make_fqdn())
def helo(self, name=''): """SMTP 'helo' command. Hostname to send for this command defaults to the FQDN of the local host. """ self.putcmd("helo", _get_fqdn_hostname(name)) (code,msg)=self.getreply() self.helo_resp=msg return (code,msg)
8b02e78666898f52bb009583d37a47f330b55958 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8125/8b02e78666898f52bb009583d37a47f330b55958/smtplib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 13150, 83, 12, 2890, 16, 508, 2218, 11, 4672, 3536, 55, 14636, 296, 76, 24214, 11, 1296, 18, 17423, 358, 1366, 364, 333, 1296, 3467, 358, 326, 23127, 8609, 434, 326, 1191, 1479, 18, 35...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 13150, 83, 12, 2890, 16, 508, 2218, 11, 4672, 3536, 55, 14636, 296, 76, 24214, 11, 1296, 18, 17423, 358, 1366, 364, 333, 1296, 3467, 358, 326, 23127, 8609, 434, 326, 1191, 1479, 18, 35...
def __init__(self, file):
def __init__(self, file, mode): self._mode = mode
def __init__(self, file): self._dirfile = file + _os.extsep + 'dir' self._datfile = file + _os.extsep + 'dat' self._bakfile = file + _os.extsep + 'bak' # Mod by Jack: create data file if needed try: f = _open(self._datfile, 'r') except IOError: f = _open(self._datfile, 'w') f.close() self._update()
1038d9cb28e2decddd3ef51c3b7aea47556e8453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8125/1038d9cb28e2decddd3ef51c3b7aea47556e8453/dumbdbm.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 585, 16, 1965, 4672, 365, 6315, 3188, 273, 1965, 365, 6315, 1214, 768, 273, 585, 397, 389, 538, 18, 408, 10814, 397, 296, 1214, 11, 365, 6315, 3404, 768,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 585, 16, 1965, 4672, 365, 6315, 3188, 273, 1965, 365, 6315, 1214, 768, 273, 585, 397, 389, 538, 18, 408, 10814, 397, 296, 1214, 11, 365, 6315, 3404, 768,...
callfunc = self.caller args = args + (kwargs,) for sfunc, t in cleanlist('streamin'): callfunc(self, sfunc, 'streamin', False, ret, *args)
callfunc, cnam = self.caller, 'streamin' sig = getsignal(getattr(args[0], cw.__name__, None)) if args else None if sig and cw is sig.func: args = (args[0], list(args[1:]), kwargs) else: args = (list(args), kwargs) for sfunc, t in cleanlist(cnam): callfunc(self, sfunc, cnam, False, ret, *args)
def call_streamin(self, cw, func, ret, args, kwargs): #{{{ callfunc = self.caller args = args + (kwargs,) for sfunc, t in cleanlist('streamin'): callfunc(self, sfunc, 'streamin', False, ret, *args) return ret
55d48b05758dd259bc480df4da508a1e5a9e370e /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/2635/55d48b05758dd259bc480df4da508a1e5a9e370e/signal.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 745, 67, 3256, 267, 12, 2890, 16, 14098, 16, 1326, 16, 325, 16, 833, 16, 1205, 4672, 3735, 12187, 745, 644, 16, 6227, 301, 273, 365, 18, 16140, 16, 296, 3256, 267, 11, 3553, 273, 336...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 745, 67, 3256, 267, 12, 2890, 16, 14098, 16, 1326, 16, 325, 16, 833, 16, 1205, 4672, 3735, 12187, 745, 644, 16, 6227, 301, 273, 365, 18, 16140, 16, 296, 3256, 267, 11, 3553, 273, 336...
self._g.hold(pylab_hold_state)
def _replot(self): """Replot all axes and all plotitems in the backend.""" # NOTE: only the current figure (gcf) is redrawn. if DEBUG: print "Doing replot in backend"
5c828e4a9eb5c25ba865196ffedb0a20a59460e0 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/4727/5c828e4a9eb5c25ba865196ffedb0a20a59460e0/matplotlib_.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 266, 4032, 12, 2890, 4672, 3536, 426, 4032, 777, 6515, 471, 777, 3207, 3319, 316, 326, 4221, 12123, 468, 5219, 30, 1338, 326, 783, 7837, 261, 75, 8522, 13, 353, 16540, 82, 18, 309...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 266, 4032, 12, 2890, 4672, 3536, 426, 4032, 777, 6515, 471, 777, 3207, 3319, 316, 326, 4221, 12123, 468, 5219, 30, 1338, 326, 783, 7837, 261, 75, 8522, 13, 353, 16540, 82, 18, 309...
ave_fMRI_volume = N.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'], dtype=N.float64).reshape(fmri_desc['layers'], fmri_desc['rows'], fmri_desc['cols'])
ave_fMRI_volume = NP.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'], dtype=NP.float64).reshape(fmri_desc['layers'], fmri_desc['rows'], fmri_desc['cols'])
def demo_MRI_coregistration(optimizer_method='powell', histo_method=1, smooth_histo=0, smooth_image=0, ftype=1): """ demo with (must have file ANAT1_V0001.img and fMRI directory fMRIData) measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration() show results with In [59]: measures[25]['cost'] Out[59]: -0.48607185 In [60]: measures[25]['align_cost'] Out[60]: -0.99514639 In [61]: measures[25]['align_rotate'] Out[61]: array([ 1.94480181, 5.64703989, 5.35002136, -5.00544405, -2.2712214, -1.42249691], dtype=float32) In [62]: measures[25]['rotate'] Out[62]: array([ 1.36566341, 4.70644331, 4.68198586, -4.32256889, -2.47607017, -2.39173937], dtype=float32) """ # demo of alignment of fMRI series with anatomical MRI # in this demo, each fMRI volume is first perturbed (rotated, translated) # by a random value. The initial registration is measured, then the optimal # alignment is computed and the registration measure made following the volume remap. # The fMRI registration is done with the first fMRI volume using normalized cross-correlation. # Each fMRI volume is rotated to the fMRI-0 volume and the series is ensemble averaged. # The ensemble averaged is then registered with the anatomical MRI volume using normalized mutual information. # The fMRI series is then rotated with this parameter. The alignments are done with 3D cubic splines. # read the anatomical MRI volume anat_desc = load_anatMRI_desc() imageF_anat = load_volume(anat_desc, imagename='ANAT1_V0001.img') # the sampling structure imdata = build_structs() # the volume filter imageF_anat['fwhm'] = build_fwhm(imageF_anat['mat'], imdata['step']) # read in the file list of the fMRI data metric_test = N.dtype([('cost', 'f'), ('align_cost', 'f'), ('rotate', 'f', 6), ('align_rotate', 'f', 6)]) fMRIdata = read_fMRI_directory('fMRIData\*.img') fmri_desc = load_fMRI_desc() fmri_series = {} ave_fMRI_volume = N.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'], dtype=N.float64).reshape(fmri_desc['layers'], fmri_desc['rows'], fmri_desc['cols']) count = 0 number_volumes = len(fMRIdata) measures = N.zeros(number_volumes, dtype=metric_test) # load and perturb (rotation, translation) the fMRI volumes for i in fMRIdata: image = load_volume(fmri_desc, i) # random perturbation of angle, translation for each volume beyond the first if count == 0: image['fwhm'] = build_fwhm(image['mat'], imdata['step']) fmri_series[count] = image count = count + 1 else: x = N.random.random(6) - 0.5 x = 10.0 * x fmri_series[count] = demo_rotate_fMRI_volume(image, x) measures[count]['rotate'][0:6] = x[0:6] count = count + 1 # load and register the fMRI volumes with volume_0 using normalized cross correlation metric imageF = fmri_series[0] if smooth_image: image_F_xyz = filter_image_3D(imageF['data'], imageF['fwhm'], ftype) imageF['data'] = image_F_xyz for i in range(1, number_volumes): imageG = fmri_series[i] # the measure prior to alignment measures[i]['cost'] = check_alignment(imageF, imageG, imdata, method='ncc', lite=histo_method, smhist=smooth_histo) x = python_coreg(imageF, imageG, imdata, lite=histo_method, method='ncc', opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image) measures[i]['align_rotate'][0:6] = x[0:6] measures[i]['align_cost'] = check_alignment(imageF, imageG, imdata, method='ncc', lite=histo_method, smhist=smooth_histo, alpha=x[0], beta=x[1], gamma=x[2], Tx=x[3], Ty=x[4], Tz=x[5]) # align the volumes and average them for co-registration with the anatomical MRI ave_fMRI_volume = fmri_series[0]['data'].astype(N.float64) for i in range(1, number_volumes): image = fmri_series[i] x[0:6] = measures[i]['align_rotate'][0:6] # overwrite the fMRI volume with the aligned volume fmri_series[i] = remap_image(image, x, resample='cubic') ave_fMRI_volume = ave_fMRI_volume + fmri_series[i]['data'].astype(N.float64) ave_fMRI_volume = (ave_fMRI_volume / float(number_volumes)).astype(N.uint8) ave_fMRI_volume = {'data' : ave_fMRI_volume, 'mat' : imageF['mat'], 'dim' : imageF['dim'], 'fwhm' : imageF['fwhm']} # register (using normalized mutual information) with the anatomical MRI if smooth_image: image_F_anat_xyz = filter_image_3D(imageF_anat['data'], imageF_anat['fwhm'], ftype) imageF_anat['data'] = image_F_anat_xyz x = python_coreg(imageF_anat, ave_fMRI_volume, imdata, lite=histo_method, method='nmi', opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image) print 'functional-anatomical align parameters ' print x for i in range(number_volumes): image = fmri_series[i] # overwrite the fMRI volume with the anatomical-aligned volume fmri_series[i] = remap_image(image, x, resample='cubic') return measures, imageF_anat, fmri_series
de681f926e982b0212f2f58bf637917926737f89 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12971/de681f926e982b0212f2f58bf637917926737f89/_registration.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 21477, 67, 49, 2259, 67, 3644, 75, 4218, 12, 29594, 67, 2039, 2218, 23509, 1165, 2187, 5356, 83, 67, 2039, 33, 21, 16, 11957, 67, 11488, 83, 33, 20, 16, 11957, 67, 2730, 33, 20, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 21477, 67, 49, 2259, 67, 3644, 75, 4218, 12, 29594, 67, 2039, 2218, 23509, 1165, 2187, 5356, 83, 67, 2039, 33, 21, 16, 11957, 67, 11488, 83, 33, 20, 16, 11957, 67, 2730, 33, 20, 16, ...
parent.remove(notebooks["board"])
if parent == boardsRemember: boardsRemember.remove(notebooks["board"]) else: label = parent.get_tab_label(notebooks["board"]) parent.remove_page(parent.page_num(notebooks["board"])) parent.append_page(boardsRemember, label)
def setStuffColor (color): for boardvbox in notebooks["board"].get_children(): ccalign, boardcontrol = boardvbox.get_children() ccalign.child.modify_bg(ccalign.child.state, color) boardcontrol.view.modify_bg(boardcontrol.view.state, color)
25aaf887991909b5219825b9198c03bd8187b010 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/5339/25aaf887991909b5219825b9198c03bd8187b010/gamewidget.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 444, 510, 3809, 2957, 261, 3266, 4672, 364, 11094, 90, 2147, 316, 4721, 12567, 9614, 3752, 6, 8009, 588, 67, 5906, 13332, 276, 771, 724, 16, 11094, 7098, 273, 11094, 90, 2147, 18, 588, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 444, 510, 3809, 2957, 261, 3266, 4672, 364, 11094, 90, 2147, 316, 4721, 12567, 9614, 3752, 6, 8009, 588, 67, 5906, 13332, 276, 771, 724, 16, 11094, 7098, 273, 11094, 90, 2147, 18, 588, ...
self.assertEqual(comp.default(), "<span>42</span>\n")
self.assertEqual(comp.default(), "42\n")
def test_binding(self): from zope.publisher.browser import TestRequest comp = PTComponent(Content(), TestRequest()) self.assertEqual(comp.index(), "42\n") self.assertEqual(comp.nothing(), "\n") self.assertEqual(comp.default(), "<span>42</span>\n")
80b5b29a0586ba86234b7bae3bd83682e7c7511a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9525/80b5b29a0586ba86234b7bae3bd83682e7c7511a/test_binding.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 7374, 12, 2890, 4672, 628, 998, 1306, 18, 26018, 18, 11213, 1930, 7766, 691, 1161, 273, 453, 56, 1841, 12, 1350, 9334, 7766, 691, 10756, 365, 18, 11231, 5812, 12, 2919, 18, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 7374, 12, 2890, 4672, 628, 998, 1306, 18, 26018, 18, 11213, 1930, 7766, 691, 1161, 273, 453, 56, 1841, 12, 1350, 9334, 7766, 691, 10756, 365, 18, 11231, 5812, 12, 2919, 18, 1...
self.add_mid_point(chr_pos_ls, chr_pos2adjacent_window)
cls.add_mid_point(chr_pos_ls, chr_pos2adjacent_window)
def findSNPsInRegion(self, snp_info, chromosome, start, stop, center_snp_position=None): """ 2009-4-30 decide whether to use (chr,pos) or (chr,pos,offset) to represent a SNP based on snp_info.chr_pos2index 2008-10-1 called by plotSNPRegion() find SNPs in this region, if center_snp_position is not given, find one. similar to getSNPsAroundThisSNP() """ sys.stderr.write("Get SNPs in this region ...") chr_pos_ls = [] chr_pos2adjacent_window = {} j = 0 midpoint = (start+stop)/2. if center_snp_position is None: _center_snp_position = start else: _center_snp_position = center_snp_position center_snp = SNPPassingData(chromosome=chromosome, position=_center_snp_position, snps_id=None) #2009-3-27 get a SNP representation from snp_info to see whether the SNP is represented by chr_pos or chr_pos_offset fst_chr_pos = snp_info.chr_pos2index.keys()[0] snp_representation_type = len(fst_chr_pos) for i in range(start-1, stop+2): new_pos = i if snp_representation_type==3: #2009-3-27, 3-number representation new_chr_pos = (chromosome, new_pos, 0) else: new_chr_pos = (chromosome, new_pos) if new_chr_pos in snp_info.chr_pos2index: if center_snp_position is None and abs(new_pos-midpoint)<abs(center_snp.position-midpoint): #this SNP is closer to the center center_snp.position = new_pos chr_pos_ls.append(new_chr_pos) if j!=0: self.add_mid_point(chr_pos_ls, chr_pos2adjacent_window) j += 1 if len(chr_pos_ls)>1: #deal with the leftest point of the 1st chr_pos chr_pos = chr_pos_ls[0] if chr_pos in chr_pos2adjacent_window: #if chr_pos_ls has only one element, then nothing would be in chr_pos2adjacent_window window_size = chr_pos2adjacent_window[chr_pos][0]-chr_pos[1] chr_pos2adjacent_window[chr_pos] = [chr_pos[1]-window_size, chr_pos[1]+window_size] #deal with the rightest point of the 1st chr_pos chr_pos = chr_pos_ls[-1] if chr_pos in chr_pos2adjacent_window: # window_size = chr_pos[1] - chr_pos2adjacent_window[chr_pos][0] chr_pos2adjacent_window[chr_pos] = [chr_pos[1]-window_size, chr_pos[1]+window_size] center_snp.snps_id = '%s_%s'%(center_snp.chromosome, center_snp.position) snp_region = PassingData(chr_pos_ls=chr_pos_ls, chr_pos2adjacent_window=chr_pos2adjacent_window, center_snp=center_snp,\ snp_representation_type=snp_representation_type) sys.stderr.write("Done.\n") return snp_region
bdc3752350001c791e90fb2760fcc0578318b113 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/9645/bdc3752350001c791e90fb2760fcc0578318b113/DrawSNPRegion.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 13653, 18124, 382, 5165, 12, 2890, 16, 28648, 67, 1376, 16, 22674, 16, 787, 16, 2132, 16, 4617, 67, 87, 6782, 67, 3276, 33, 7036, 4672, 3536, 4044, 29, 17, 24, 17, 5082, 16288, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 13653, 18124, 382, 5165, 12, 2890, 16, 28648, 67, 1376, 16, 22674, 16, 787, 16, 2132, 16, 4617, 67, 87, 6782, 67, 3276, 33, 7036, 4672, 3536, 4044, 29, 17, 24, 17, 5082, 16288, ...
create temporary table tmpDupPath(
create table tmpDupPath(
def fixDuplicatePaths(self, cu, repos): self.db.dropIndex("TroveFiles", "TroveFilesPathIdx") # it is faster to select all the (instanceId, path) pairs into # an indexed table than create a non-unique index, do work, # drop the non-unique index and recreate it as a unique one cu.execute(""" create temporary table tmpDupPath( instanceId integer not null, path varchar(767) not null ) %(TABLEOPTS)s""" % self.db.keywords) self.db.createIndex("tmpDupPath", "tmpDupPathIdx", "instanceId, path", check = False) cu.execute(""" create temporary table tmpDups( counter integer, instanceId integer, path varchar(767) ) %(TABLEOPTS)s""" % self.db.keywords) logMe(2, "searching the trovefiles table...") cu.execute("insert into tmpDupPath (instanceId, path) " "select instanceId, path from TroveFiles") logMe(2, "looking for troves with duplicate paths...") cu.execute(""" insert into tmpDups (counter, instanceId, path) select count(*) as c, instanceId, path from tmpDupPath group by instanceId, path having count(*) > 1""") counter = cu.execute("select count(*) from tmpDups").fetchall()[0][0] logMe(3, "detected %d duplicates" % (counter,)) # loop over every duplicate and apply the appropiate fix cu.execute("select instanceId, path from tmpDups") for (instanceId, path) in cu.fetchall(): cu.execute("""select distinct instanceId, streamId, versionId, pathId, path from trovefiles where instanceId = ? and path = ? order by streamId, versionId, pathId""", (instanceId, path)) ret = cu.fetchall() # delete all the duplicates and put the first one back cu.execute("delete from trovefiles " "where instanceId = ? and path = ?", (instanceId, path)) # in case they are different, we pick the oldest, chances are it is # more "original" cu.execute("insert into trovefiles " "(instanceId, streamId, versionId, pathId, path) " "values (?,?,?,?,?)", tuple(ret[0])) if len(ret) > 1: # need to recompute the sha1 - we might have changed the trove manifest # if the records were different self.fixTroveSig(repos, instanceId)
2648df8d5e6175b8e96c000aea2cd4954783480e /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/8747/2648df8d5e6175b8e96c000aea2cd4954783480e/migrate.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2917, 11826, 4466, 12, 2890, 16, 15985, 16, 13686, 4672, 365, 18, 1966, 18, 7285, 1016, 2932, 56, 303, 537, 2697, 3113, 315, 56, 303, 537, 2697, 743, 4223, 7923, 468, 518, 353, 12063, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2917, 11826, 4466, 12, 2890, 16, 15985, 16, 13686, 4672, 365, 18, 1966, 18, 7285, 1016, 2932, 56, 303, 537, 2697, 3113, 315, 56, 303, 537, 2697, 743, 4223, 7923, 468, 518, 353, 12063, ...
'output_libname' should be a library name, not a filename; the filename will be inferred from the library name.
'output_libname' should be a library name, not a filename; the filename will be inferred from the library name. 'output_dir' is the directory where the library file will be put. 'debug' is a boolean; if true, debugging information will be included in the library (note that on most platforms, it is the compile step where this matters: the 'debug' flag is included here just for consistency).""" pass def link_shared_lib (self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, debug=0, extra_preargs=None, extra_postargs=None): """Link a bunch of stuff together to create a shared library file. Has the same effect as 'link_static_lib()' except that the filename inferred from 'output_libname' will most likely be different, and the type of file generated will almost certainly be different
def link_static_lib (self, objects, output_libname, output_dir=None): """Link a bunch of stuff together to create a static library file. The "bunch of stuff" consists of the list of object files supplied as 'objects', the extra object files supplied to 'add_link_object()' and/or 'set_link_objects()', the libraries supplied to 'add_library()' and/or 'set_libraries()', and the libraries supplied as 'libraries' (if any).
6f928a0da711b577c960fab9a40156a59e50efa3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8125/6f928a0da711b577c960fab9a40156a59e50efa3/ccompiler.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1692, 67, 3845, 67, 2941, 261, 2890, 16, 2184, 16, 876, 67, 2941, 529, 16, 876, 67, 1214, 33, 7036, 4672, 3536, 2098, 279, 25606, 434, 10769, 9475, 358, 752, 279, 760, 5313, 585, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1692, 67, 3845, 67, 2941, 261, 2890, 16, 2184, 16, 876, 67, 2941, 529, 16, 876, 67, 1214, 33, 7036, 4672, 3536, 2098, 279, 25606, 434, 10769, 9475, 358, 752, 279, 760, 5313, 585, 18, ...
args = [arg.lower().strip() for arg in args]
waitfor = wait_for.lower()
def Wait(self, *args, **kwargs): """Wait for the window to be in any of the following states:
3041c3cbb3a9cba089ef284c77ea17fa506bba68 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/6953/3041c3cbb3a9cba089ef284c77ea17fa506bba68/application.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 5838, 12, 2890, 16, 380, 1968, 16, 2826, 4333, 4672, 3536, 5480, 364, 326, 2742, 358, 506, 316, 1281, 434, 326, 3751, 5493, 30, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 5838, 12, 2890, 16, 380, 1968, 16, 2826, 4333, 4672, 3536, 5480, 364, 326, 2742, 358, 506, 316, 1281, 434, 326, 3751, 5493, 30, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
('test', ('%(testdir)s/')), ('debuginfo', ('%(debugsrcdir)s/', '%(debuglibdir)s/')),
def updateArgs(self, *args, **keywords):
0ce42b0bd994274693cafa81726e1f60213a5bc5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8747/0ce42b0bd994274693cafa81726e1f60213a5bc5/packagepolicy.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 2615, 12, 2890, 16, 380, 1968, 16, 2826, 11771, 4672, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 2615, 12, 2890, 16, 380, 1968, 16, 2826, 11771, 4672, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
self.progress_total_bytes=0
self.progress_total_bytes=666
def update_md5(self):
5b65a77e7cffd69c97aa287b67dac553a92ef8cc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2200/5b65a77e7cffd69c97aa287b67dac553a92ef8cc/parano.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 67, 1264, 25, 12, 2890, 4672, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 67, 1264, 25, 12, 2890, 4672, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
tcord = G1
if board.variant == FISCHERRANDOMCHESS: tcord = board.ini_rooks[0][1] else: tcord = G1
def parseSAN (board, san): """ Parse a Short/Abbreviated Algebraic Notation string """ if not san: raise ParsingError, (san, _("the move is an empty string"), board.asFen()) elif len(san) < 2: raise ParsingError, (san, _("the move is too short"), board.asFen()) notat = san color = board.color if notat[-1] in ("+", "#"): notat = notat[:-1] flag = NORMAL_MOVE # If last char is a piece char, we assue it the promote char c = notat[-1].lower() if c in chr2Sign: flag = chr2Sign[c] + 2 if notat[-2] == "=": notat = notat[:-2] else: notat = notat[:-1] if len(notat) < 2: raise ParsingError, (san, _("the move needs a piece and a cord"), board.asFen()) notat = notat.replace("0","O").replace("o","O") if notat.startswith("O-O"): if color == WHITE: fcord = board.ini_kings[0] #E1 if notat == "O-O": flag = KING_CASTLE tcord = G1 else: flag = QUEEN_CASTLE tcord = C1 else: fcord = board.ini_kings[1] #E8 if notat == "O-O": flag = KING_CASTLE tcord = G8 else: flag = QUEEN_CASTLE tcord = C8 return newMove (fcord, tcord, flag) if notat[0] in ("Q", "R", "B", "K", "N"): piece = chr2Sign[notat[0].lower()] notat = notat[1:] else: piece = PAWN if "x" in notat: notat, tcord = notat.split("x") if not tcord in cordDic: raise ParsingError, ( san, _("the captured cord (%s) is incorrect") % tcord, board.asFen()) tcord = cordDic[tcord] if piece == PAWN: # If a pawn is attacking an empty cord, we assue it an enpassant if board.arBoard[tcord] == EMPTY: flag = ENPASSANT else: if not notat[-2:] in cordDic: raise ParsingError, ( san, "the end cord (%s) is incorrect" % notat[-2:], board.asFen()) tcord = cordDic[notat[-2:]] notat = notat[:-2] # If there is any extra location info, like in the move Bexd1 or Nh3f4 we # want to know frank = None ffile = None if notat and notat[0] in reprRank: frank = int(notat[0])-1 notat = notat[1:] if notat and notat[0] in reprFile: ffile = ord(notat[0]) - ord("a") notat = notat[1:] if notat and notat[0] in reprRank: frank = int(notat[0])-1 notat = notat[1:] # We find all pieces who could have done it. (If san was legal, there should # never be more than one) from lmovegen import genAllMoves for move in genAllMoves(board): if TCORD(move) != tcord: continue f = FCORD(move) if board.arBoard[f] != piece: continue if frank != None and frank != RANK(f): continue if ffile != None and ffile != FILE(f): continue if flag in PROMOTIONS and FLAG(move) != flag: continue board_clone = board.clone() board_clone.applyMove(move) if board_clone.opIsChecked(): continue return move errstring = "no %s is able to move to %s" % (reprPiece[piece], reprCord[tcord]) raise ParsingError, (san, errstring, board.asFen())
a58b8aedb826b5e26dcb0cdef804c589b245a636 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5339/a58b8aedb826b5e26dcb0cdef804c589b245a636/lmove.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 22721, 261, 3752, 16, 272, 304, 4672, 3536, 2884, 279, 7925, 19, 11945, 2262, 29087, 335, 2288, 367, 533, 3536, 309, 486, 272, 304, 30, 1002, 19761, 668, 16, 261, 87, 304, 16, 38...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 22721, 261, 3752, 16, 272, 304, 4672, 3536, 2884, 279, 7925, 19, 11945, 2262, 29087, 335, 2288, 367, 533, 3536, 309, 486, 272, 304, 30, 1002, 19761, 668, 16, 261, 87, 304, 16, 38...
if 'delta' not in perf_data[key] or 'var' not in perf_data[key]: bad_keys.append(key) if (not isinstance(perf_data[key]['delta'], int) and not isinstance(perf_data[key]['delta'], float)): bad_keys.append(key) if (not isinstance(perf_data[key]['var'], int) and not isinstance(perf_data[key]['var'], float)): bad_keys.append(key)
if 'regress' in perf_data[key]: if 'improve' not in perf_data[key]: bad_keys.append(key) if (not isinstance(perf_data[key]['regress'], int) and not isinstance(perf_data[key]['regress'], float)): bad_keys.append(key) if (not isinstance(perf_data[key]['improve'], int) and not isinstance(perf_data[key]['improve'], float)): bad_keys.append(key) else: if 'delta' not in perf_data[key] or 'var' not in perf_data[key]: bad_keys.append(key) if (not isinstance(perf_data[key]['delta'], int) and not isinstance(perf_data[key]['delta'], float)): bad_keys.append(key) if (not isinstance(perf_data[key]['var'], int) and not isinstance(perf_data[key]['var'], float)): bad_keys.append(key)
def testPerfExpectations(self): perf_data = LoadData()
994419a85f0e8657a9aa958c4990643ff6ecc3e9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9392/994419a85f0e8657a9aa958c4990643ff6ecc3e9/perf_expectations_unittest.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 22016, 11988, 1012, 12, 2890, 4672, 14184, 67, 892, 273, 4444, 751, 1435, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 22016, 11988, 1012, 12, 2890, 4672, 14184, 67, 892, 273, 4444, 751, 1435, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -10...
DistributedObject.DistributedObject.disable(self)
def disable(self): DistributedLevel.notify.debug('disable')
98a8e919216940feeee8ec4f7f06fa53eaf6d45f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/7242/98a8e919216940feeee8ec4f7f06fa53eaf6d45f/DistributedLevel.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4056, 12, 2890, 4672, 27877, 2355, 18, 12336, 18, 4148, 2668, 8394, 6134, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4056, 12, 2890, 4672, 27877, 2355, 18, 12336, 18, 4148, 2668, 8394, 6134, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -10...
""" fl_add_dial(type, x, y, w, h, label) -> object ref.
""" fl_add_dial(type, x, y, w, h, label) -> object
def fl_add_dial(type, x, y, w, h, label): """ fl_add_dial(type, x, y, w, h, label) -> object ref. """ retval = _fl_add_dial(type, x, y, w, h, label) return retval
8765c710f695de392f6fc7c664c746ec98668b1d /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/2429/8765c710f695de392f6fc7c664c746ec98668b1d/xformslib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1183, 67, 1289, 67, 25909, 12, 723, 16, 619, 16, 677, 16, 341, 16, 366, 16, 1433, 4672, 3536, 1183, 67, 1289, 67, 25909, 12, 723, 16, 619, 16, 677, 16, 341, 16, 366, 16, 1433, 13, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1183, 67, 1289, 67, 25909, 12, 723, 16, 619, 16, 677, 16, 341, 16, 366, 16, 1433, 4672, 3536, 1183, 67, 1289, 67, 25909, 12, 723, 16, 619, 16, 677, 16, 341, 16, 366, 16, 1433, 13, ...
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
YouTubeVideoQuery.__init__(self, feed=feed, text_query=text_query,
def __init__(self, username=None, feed_type=None, subscription_id=None, text_query=None, params=None, categories=None):
6c8d4082f9427df8087482f3c906d2a208f7b13f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10620/6c8d4082f9427df8087482f3c906d2a208f7b13f/service.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 2718, 33, 7036, 16, 4746, 67, 723, 33, 7036, 16, 4915, 67, 350, 33, 7036, 16, 977, 67, 2271, 33, 7036, 16, 859, 33, 7036, 16, 6477, 33, 7036, 4672, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 2718, 33, 7036, 16, 4746, 67, 723, 33, 7036, 16, 4915, 67, 350, 33, 7036, 16, 977, 67, 2271, 33, 7036, 16, 859, 33, 7036, 16, 6477, 33, 7036, 4672, 2...
location = with_nochange(record.location, location, self.rv),
location = with_nochange(record.location, location, self.rv),
def import_event(self, record):
0dc1598b7d79b16d5722ec37564954481cacc118 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9228/0dc1598b7d79b16d5722ec37564954481cacc118/translator.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1930, 67, 2575, 12, 2890, 16, 1409, 4672, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1930, 67, 2575, 12, 2890, 16, 1409, 4672, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
if not team_name in teams : teams.append(team_name)
team_map[team_name] = 1 + team_map.get(team_name, 0)
def find_testing_teams() : teams = [] process_map = {} process_list = get_output("ps -o comm= -e | sort | uniq").strip().split('\n') random.shuffle(process_list) for process in process_list : for pattern in team_name_map.keys() : if pattern.match(process) : process_map[process] = 1 team_name = team_name_map[pattern] if not team_name in teams : teams.append(team_name) break if len(teams) >= 2 : break if len(teams) <= 1 : teams.extend(find_unknown_testing_teams(2 - len(teams), process_map)) teams.sort() return teams
b4daabae63b33abbfbb8a0fed1e856e03cf87a44 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/228/b4daabae63b33abbfbb8a0fed1e856e03cf87a44/client.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 67, 3813, 310, 67, 27292, 1435, 294, 21475, 273, 5378, 1207, 67, 1458, 273, 2618, 1207, 67, 1098, 273, 336, 67, 2844, 2932, 1121, 300, 83, 1543, 33, 300, 73, 571, 1524, 571, 1074...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 67, 3813, 310, 67, 27292, 1435, 294, 21475, 273, 5378, 1207, 67, 1458, 273, 2618, 1207, 67, 1098, 273, 336, 67, 2844, 2932, 1121, 300, 83, 1543, 33, 300, 73, 571, 1524, 571, 1074...
if t_movies.has_key("year") and (t_movies["year"]==None or int(t_movies["year"]) < 1986):
if t_movies.has_key("year") and (t_movies["year"]==None or int(t_movies["year"]) < 1886):
def clean_t_movies(self, t_movies): for i in t_movies.keys(): if t_movies[i] == '': t_movies[i]=None for i in ["color","cond","layers","region", 'media', 'vcodec']: if t_movies.has_key(i) and t_movies[i] == -1: t_movies[i]=None for i in ["volume_id","collection_id", 'runtime']: if t_movies.has_key(i) and (t_movies[i]==None or int(t_movies[i]) == 0): t_movies[i] = None if t_movies.has_key("year") and (t_movies["year"]==None or int(t_movies["year"]) < 1986): t_movies["year"] = None
5c1a74bc0a57414cea377fb82513d179860c1890 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2687/5c1a74bc0a57414cea377fb82513d179860c1890/sql.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2721, 67, 88, 67, 8683, 29028, 12, 2890, 16, 268, 67, 8683, 29028, 4672, 364, 277, 316, 268, 67, 8683, 29028, 18, 2452, 13332, 309, 268, 67, 8683, 29028, 63, 77, 65, 422, 875, 30, 26...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2721, 67, 88, 67, 8683, 29028, 12, 2890, 16, 268, 67, 8683, 29028, 4672, 364, 277, 316, 268, 67, 8683, 29028, 18, 2452, 13332, 309, 268, 67, 8683, 29028, 63, 77, 65, 422, 875, 30, 26...
stats['years'][year] = 0
stats['years'][year] = { 'duration': 0, 'trips': 0 }
def build_stats(trip_list, traveller_info): # TODO break this apart and/or do similar things in subroutines # TODO build more year metadata stats = {'countries': {}, 'cities': {}, 'years': {}, 'home': { 'trips': 0, 'duration':0, }, 'away': { 'trips': 0, 'duration':0, }, 'future': 0, 'types': {}, 'ordered': {}, } home_country = traveller_info['home_city']['country'] for trip in trip_list: # skip if not a past trip if trip['status'] != "Past": if trip['status'] == "Ongoing": stats['current'] = trip['city']['name'] else: stats['future'] += 1 continue # how long (simple version...) # TODO never double count date duration = trip['finishdate'] - trip['startdate'] # build country data country = trip['city']['country'] display = country inline = country # special casing! if not country.find("United"): # TODO there's something wrong here... inline = "the "+country # TODO and this should be a hash anyway if not country.find("Hong Kong"): display = "Hong Kong" inline = "Hong Kong" # stuff info into the data structure if not country in stats['countries']: stats['countries'][country] = { 'duration': 0, 'trips': 0, 'display':display, 'inline':inline, 'code':trip['city']['country_code'], 'rgb':md5.new(country).hexdigest()[0:6]} stats['countries'][country]['duration'] += duration.days stats['countries'][country]['trips'] += 1 if not trip['return_transport_type'] in stats['types']: stats['types'][trip['return_transport_type']] = {'trips':0} stats['types'][trip['return_transport_type']]['trips'] += 0.5 if not trip['outgoing_transport_type'] in stats['types']: stats['types'][trip['outgoing_transport_type']] = {'trips':0} stats['types'][trip['outgoing_transport_type']]['trips'] += 0.5 if (country == home_country): stats['home']['trips'] += 1; stats['home']['duration'] += duration.days else: stats['away']['trips'] += 1; stats['away']['duration'] += duration.days # build city data city = trip['city']['name'] rgb = trip['city']['rgb'] if not city in stats['cities']: stats['cities'][city] = { 'duration': 0, 'trips': 0, 'rgb':rgb, 'country':country} stats['cities'][city]['duration'] += duration.days stats['cities'][city]['trips'] += 1 # build year data year = trip['startdate'].year if not year in stats['years']: stats['years'][year] = 0 if year == trip['finishdate'].year: stats['years'][year] += duration.days else: if trip['finishdate'].year - year == 1: # spans a single year boundary, and is therefore Sane # if there's *anyone* who has a trip spanning two, they can bloody # well write this themselves. Otherwise, assume they mean they're # living there now. Onwards... year_end = datetime(year, 12, 31) stats['years'][year] += (year_end-trip['startdate']).days year = trip['finishdate'].year year_start = datetime(year, 1, 1) if not year in stats['years']: stats['years'][year] = 0 stats['years'][year] += (trip['finishdate']-year_start).days # do we want to supply full-blown cross-cut stats? maybe later... # reorder final stats stats['ordered']['years'] = sorted(stats['years']) stats['ordered']['years'].reverse() stats['ordered']['types'] = sorted(stats['types'], lambda x, y: (int(stats['types'][y]['trips']))-(int(stats['types'][x]['trips']))) stats['ordered']['years_by_trip'] = sorted(stats['years'], lambda x, y: (stats['years'][y])-(stats['years'][x])) stats['ordered']['countries'] = sorted(stats['countries'], lambda x, y: (stats['countries'][y]['duration'])-(stats['countries'][x]['duration'])) stats['ordered']['cities'] = sorted(stats['cities'], lambda x, y: (stats['cities'][y]['duration'])-(stats['cities'][x]['duration'])) start = 160 stats['rgb'] = stats['countries'][stats['ordered']['countries'][0]]['rgb'] stats['rgb_start'] = "%x%x%x" % (start, start, start) # scale country stats for map (including colours) topcountry = stats['ordered']['countries'][0] topduration = stats['countries'][topcountry]['duration'] r = stats['rgb'][0:2]; g = stats['rgb'][2:4]; b = stats['rgb'][4:6] for country in stats['countries'].keys(): scaled = 100*stats['countries'][country]['duration']/topduration sr = (scaled*(int(r, 16)-start)/100)+start sg = (scaled*(int(g, 16)-start)/100)+start sb = (scaled*(int(b, 16)-start)/100)+start stats['countries'][country]['scaled'] = scaled stats['countries'][country]['rgb_scaled'] = "%x%x%x" % (sr, sg, sb) # scale transport types toptype = stats['ordered']['types'][0] toptrip = int(stats['types'][toptype]['trips']) for type in stats['types'].keys(): stats['types'][type]['scaled'] = 100*int(stats['types'][type]['trips'])/toptrip # scale days on trips stats['topyear'] = stats['ordered']['years_by_trip'][0] stats['away']['days'] = (stats['years'][stats['topyear']])/3.66 stats['home']['days'] = (366-stats['years'][stats['topyear']])/3.66 return stats
1c67bb5808334c681c6ba34ea3f5485a4f220770 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/11110/1c67bb5808334c681c6ba34ea3f5485a4f220770/main.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1361, 67, 5296, 12, 25125, 67, 1098, 16, 29090, 749, 67, 1376, 4672, 468, 2660, 898, 333, 513, 485, 471, 19, 280, 741, 7281, 9198, 316, 720, 7028, 1465, 468, 2660, 1361, 1898, 3286, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1361, 67, 5296, 12, 25125, 67, 1098, 16, 29090, 749, 67, 1376, 4672, 468, 2660, 898, 333, 513, 485, 471, 19, 280, 741, 7281, 9198, 316, 720, 7028, 1465, 468, 2660, 1361, 1898, 3286, 19...
None, [cty.POINTER(FL_OBJECT), cty.c_int],
None, [cty.POINTER(FL_OBJECT), cty.c_int],
def fl_get_timer(ob): """ fl_get_timer(ob) -> num. """ retval = _fl_get_timer(ob) return retval
9942dac8ce2b35a1e43615a26fd8e7054ef805d3 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/2429/9942dac8ce2b35a1e43615a26fd8e7054ef805d3/xformslib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1183, 67, 588, 67, 12542, 12, 947, 4672, 3536, 1183, 67, 588, 67, 12542, 12, 947, 13, 317, 818, 18, 3536, 225, 5221, 273, 389, 2242, 67, 588, 67, 12542, 12, 947, 13, 327, 5221, 282, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1183, 67, 588, 67, 12542, 12, 947, 4672, 3536, 1183, 67, 588, 67, 12542, 12, 947, 13, 317, 818, 18, 3536, 225, 5221, 273, 389, 2242, 67, 588, 67, 12542, 12, 947, 13, 327, 5221, 282, ...
Now, will we'll adapt our adapter factory to a trusted adapter factory:
def _customizeUnprotected(self, adapter, context): if (ILocation.providedBy(adapter) and adapter.__parent__ is None): adapter.__parent__ = context return adapter
... def __init__(self, context):
c4df19b7123c032c70b5860013f9a3c864759b7f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9520/c4df19b7123c032c70b5860013f9a3c864759b7f/adapter.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1372, 377, 1652, 1001, 2738, 972, 12, 2890, 16, 819, 4672, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1372, 377, 1652, 1001, 2738, 972, 12, 2890, 16, 819, 4672, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
self.logLevel = gConfig.getValue('DIRAC/LogLevel','INFO') self.siteRoot = gConfig.getValue('LocalSite/Root',DIRAC.rootPath) self.localArea = gConfig.getValue('LocalSite/LocalArea','/tmp') self.siteName = gConfig.getValue('LocalSite/Site','Unknown') self.cpuFactor = gConfig.getValue('LocalSite/CPUScalingFactor','Unknown') self.maxPilots = gConfig.getValue('LocalSite/MaxPilots',100) self.log.setLevel(self.logLevel) self.log.info("Log level set to",self.logLevel)
self.logLevel = gConfig.getValue( 'DIRAC/LogLevel', 'INFO' ) self.siteRoot = gConfig.getValue( 'LocalSite/Root', DIRAC.rootPath ) self.localArea = gConfig.getValue( 'LocalSite/LocalArea', '/tmp' ) self.siteName = gConfig.getValue( 'LocalSite/Site', 'Unknown' ) self.cpuFactor = gConfig.getValue( 'LocalSite/CPUScalingFactor', 'Unknown' ) self.maxPilots = gConfig.getValue( 'LocalSite/MaxPilots', 100 ) self.log.setLevel( self.logLevel ) self.log.info( "Log level set to", self.logLevel )
def initialize(self,loops=0): """Sets default parameters and creates CE instance """ self.maxcount = loops
364464017f61dc703439c7a2235a47d4ba35aafe /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12864/364464017f61dc703439c7a2235a47d4ba35aafe/DiracSiteAgent.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4046, 12, 2890, 16, 383, 4473, 33, 20, 4672, 3536, 2785, 805, 1472, 471, 3414, 29538, 791, 3536, 365, 18, 1896, 1883, 273, 14075, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4046, 12, 2890, 16, 383, 4473, 33, 20, 4672, 3536, 2785, 805, 1472, 471, 3414, 29538, 791, 3536, 365, 18, 1896, 1883, 273, 14075, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
return None
return (0, None)
def runScript(prog=None, script=None, otherargs=[], force=False, rusage=False, tmpdir="/var/tmp", chroot=None): """Run (script otherargs) with interpreter prog (which can be a list containing initial arguments). Return None, or getrusage() stats of the script if rusage. Disable ldconfig optimization if force. Raise IOError, OSError.""" # FIXME? hardcodes config.rpmconfig usage if prog == None: prog = "/bin/sh" if prog == "/bin/sh" and script == None: return None if not os.path.exists(tmpdir): try: os.makedirs(os.path.dirname(tmpdir), mode=0755) except: pass try: os.makedirs(tmpdir, mode=01777) except: return None if isinstance(prog, TupleType): args = prog else: args = [prog] if not force and args == ["/sbin/ldconfig"] and script == None: if rpmconfig.delayldconfig == 1: rpmconfig.ldconfig += 1 # FIXME: assumes delayldconfig is checked after all runScript # invocations rpmconfig.delayldconfig = 1 return None elif rpmconfig.delayldconfig: rpmconfig.delayldconfig = 0 runScript("/sbin/ldconfig", force=1) if script != None: (fd, tmpfilename) = mkstemp_file(tmpdir, "rpm-tmp.") # test for open fds: # script = "ls -l /proc/$$/fd >> /$$.out\n" + script os.write(fd, script) os.close(fd) fd = None args.append(tmpfilename) args += otherargs (rfd, wfd) = os.pipe() if rusage: rusage_old = resource.getrusage(resource.RUSAGE_CHILDREN) pid = os.fork() if pid == 0: try: if chroot != None: os.chroot(chroot) os.close(rfd) if not os.path.exists("/dev"): os.mkdir("/dev") if not os.path.exists("/dev/null"): os.mknod("/dev/null", 0666, 259) fd = os.open("/dev/null", os.O_RDONLY) if fd != 0: os.dup2(fd, 0) os.close(fd) if wfd != 1: os.dup2(wfd, 1) os.close(wfd) os.dup2(1, 2) os.chdir("/") e = {"HOME": "/", "USER": "root", "LOGNAME": "root", "PATH": "/sbin:/bin:/usr/sbin:/usr/bin:/usr/X11R6/bin"} os.execve(args[0], args, e) finally: os._exit(255) os.close(wfd) # no need to read in chunks if we don't pass on data to some output func cret = "" cout = os.read(rfd, 8192) while cout: cret += cout cout = os.read(rfd, 8192) os.close(rfd) (cpid, status) = os.waitpid(pid, 0) if rusage: rusage_new = resource.getrusage(resource.RUSAGE_CHILDREN) rusage_val = [rusage_new[i] - rusage_old[i] for i in xrange(len(rusage_new))] else: rusage_val = None if script != None: os.unlink(tmpfilename) if status != 0: #or cret != "": if os.WIFEXITED(status): rpmconfig.printError("Script %s ended with exit code %d:" % \ (str(args), os.WEXITSTATUS(status))) elif os.WIFSIGNALED(status): core = "" if os.WCOREDUMP(status): core = "(with coredump)" rpmconfig.printError("Script %s killed by signal %d%s:" % \ (str(args), os.WTERMSIG(status), core)) elif os.WIFSTOPPED(status): # Can't happen, needs os.WUNTRACED rpmconfig.printError("Script %s stopped with signal %d:" % \ (str(args), os.WSTOPSIG(status))) else: rpmconfig.printError("Script %s ended (fixme: reason unknown):" % \ str(args)) cret.rstrip() rpmconfig.printError("Script %s failed: %s" % (args, cret)) # FIXME: should we be swallowing the script output? return (status, rusage_val)
1fc8cf80445525becdf49d3dac1a462c3136a76c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/1143/1fc8cf80445525becdf49d3dac1a462c3136a76c/functions.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 3651, 12, 14654, 33, 7036, 16, 2728, 33, 7036, 16, 1308, 1968, 22850, 6487, 2944, 33, 8381, 16, 436, 9167, 33, 8381, 16, 20213, 1546, 19, 1401, 19, 5645, 3113, 462, 3085, 33, 703...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 3651, 12, 14654, 33, 7036, 16, 2728, 33, 7036, 16, 1308, 1968, 22850, 6487, 2944, 33, 8381, 16, 436, 9167, 33, 8381, 16, 20213, 1546, 19, 1401, 19, 5645, 3113, 462, 3085, 33, 703...
if self.invariant: D["CreationDate"] = PDFString('19001231000000') else: D["CreationDate"] = PDFDate()
D["CreationDate"] = PDFDate(invariant=self.invariant)
def format(self, document): D = {} D["Title"] = PDFString(self.title) D["Author"] = PDFString(self.author) if self.invariant: D["CreationDate"] = PDFString('19001231000000') else: D["CreationDate"] = PDFDate() D["Producer"] = PDFString("ReportLab http://www.reportlab.com") D["Subject"] = PDFString(self.subject) PD = PDFDictionary(D) return PD.format(document)
3913b4770c7ff833e239029bbedfa1afe83b973e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/7053/3913b4770c7ff833e239029bbedfa1afe83b973e/pdfdoc.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 740, 12, 2890, 16, 1668, 4672, 463, 273, 2618, 463, 9614, 4247, 11929, 273, 12667, 780, 12, 2890, 18, 2649, 13, 463, 9614, 3594, 11929, 273, 12667, 780, 12, 2890, 18, 4161, 13, 463, 96...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 740, 12, 2890, 16, 1668, 4672, 463, 273, 2618, 463, 9614, 4247, 11929, 273, 12667, 780, 12, 2890, 18, 2649, 13, 463, 9614, 3594, 11929, 273, 12667, 780, 12, 2890, 18, 4161, 13, 463, 96...
'amount_original':currency_pool.compute(cr, uid, company_currency, currency_id, original_amount),
'amount_original':currency_pool.compute(cr, uid, company_currency, currency_id, original_amount, context=context_multi_currency),
def onchange_partner_id(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, context=None): """price Returns a dict that contains new values and context
02af34f56048406372b55ae89131f75f8d3688e5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/02af34f56048406372b55ae89131f75f8d3688e5/account_voucher.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 3427, 67, 31993, 67, 350, 12, 2890, 16, 4422, 16, 4555, 16, 3258, 16, 19170, 67, 350, 16, 13001, 67, 350, 16, 6205, 16, 5462, 67, 350, 16, 27963, 16, 819, 33, 7036, 4672, 3536, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 3427, 67, 31993, 67, 350, 12, 2890, 16, 4422, 16, 4555, 16, 3258, 16, 19170, 67, 350, 16, 13001, 67, 350, 16, 6205, 16, 5462, 67, 350, 16, 27963, 16, 819, 33, 7036, 4672, 3536, ...
if (has_content and content[-1].endswith('<br/>') or cpt):
if has_content and content[-1].endswith('<br/>') :
def paragraph_stream(stream , elt_tag_name, elt_attributes, pdf_stylesheet): """ stream : parser stream """ stack = [] story = [] content = [] stack.append((elt_tag_name, elt_attributes)) cpt = 0 end_tag = False has_content = False while True: event, value, line_number = stream_next(stream) if event == None: break #### START ELEMENT #### if event == START_ELEMENT: tag_uri, tag_name, attributes = value if tag_name in tag_ok: if tag_name in ('i', 'em', 'b', 'strong', 'u', 'sup', 'sub'): # FIXME tag = p_format_map.get(tag_name, 'b') if cpt or has_content: content[-1] += build_start_tag(tag) else: content.append(build_start_tag(tag)) cpt += 1 elif tag_name == 'span': tag = p_format_map.get(tag_name) if cpt or has_content: content[-1] += build_start_tag(tag, attributes) else: content.append(build_start_tag(tag, attributes)) cpt += 1 elif tag_name == 'br': continue elif tag_name == 'a': if cpt or has_content: content[-1] += link_stream(stream, tag_name, attributes) else: content.append(link_stream(stream, tag_name, attributes)) cpt += 1 else: print TAG_NOT_SUPPORTED % ('document', line_number, tag_name) # unknown tag stack.append((tag_name, attributes)) else: print WARNING_DTD % ('document', line_number, tag_name) #### END ELEMENT #### elif event == END_ELEMENT: tag_uri, tag_name = value if tag_name == elt_tag_name: # stack.pop --> stack[0] return create_paragraph(pdf_stylesheet, stack.pop(), content) elif tag_name in ('i', 'em', 'b', 'strong', 'u', 'sup', 'sub', 'a', 'span'): cpt -= 1 end_tag = True content[-1] += build_end_tag(p_format_map.get(tag_name, 'b')) elif tag_name == 'br': content.append("<br/>") else: print TAG_NOT_SUPPORTED % ('document', line_number, tag_name) # unknown tag stack.append((tag_name, attributes)) #### TEXT ELEMENT #### elif event == TEXT: if len(value) > 0: # alow to write : # <para><u><i>foo</i> </u></para> value = XMLContent.encode(value) # entities # FIXME if (has_content and content[-1].endswith('<br/>') or cpt): # <p> # foo <br /> # bar <br /> team # </p> # equal # <p>foo <br />bar <br />team</p> value = value.lstrip() content[-1] += value else: if end_tag: content[-1] += value end_tag = False else: has_content = True content.append(value)
61dc8a17eaa82cac44b7a3f6e98528144b4e7e77 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12681/61dc8a17eaa82cac44b7a3f6e98528144b4e7e77/rml2.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 10190, 67, 3256, 12, 3256, 269, 11572, 67, 2692, 67, 529, 16, 11572, 67, 4350, 16, 8169, 67, 19403, 4672, 3536, 1407, 294, 2082, 1407, 3536, 2110, 273, 5378, 17285, 273, 5378, 913, 273, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 10190, 67, 3256, 12, 3256, 269, 11572, 67, 2692, 67, 529, 16, 11572, 67, 4350, 16, 8169, 67, 19403, 4672, 3536, 1407, 294, 2082, 1407, 3536, 2110, 273, 5378, 17285, 273, 5378, 913, 273, ...
filesize = (self.nframes * natoms * 3) + 4
filesize = self.nframes * ((natoms * 32) + 25)
def saveFilePressed(self): if self.assy: if self.assy.filename: dir, fil, ext = fileparse(self.assy.filename) sdir = self.assy.filename else: dir, fil = "./", self.assy.name sdir = globalParms['WorkingDirectory']
de0b74280ba97f2f540e3b505a70615043ac120f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11221/de0b74280ba97f2f540e3b505a70615043ac120f/runSim.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1923, 812, 24624, 12, 2890, 4672, 309, 365, 18, 428, 93, 30, 309, 365, 18, 428, 93, 18, 3459, 30, 1577, 16, 661, 16, 1110, 273, 585, 2670, 12, 2890, 18, 428, 93, 18, 3459, 13, 272,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1923, 812, 24624, 12, 2890, 4672, 309, 365, 18, 428, 93, 30, 309, 365, 18, 428, 93, 18, 3459, 30, 1577, 16, 661, 16, 1110, 273, 585, 2670, 12, 2890, 18, 428, 93, 18, 3459, 13, 272,...
file.Write("
def WriteFormat(self, filename): """Writes the command buffer format""" file = CWriter(filename) self.WriteHeader(file) file.Write("#pragma pack(push, 1)\n") file.Write("\n")
ddec5020fb66798ba02d5e795badffca84ceab52 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/5060/ddec5020fb66798ba02d5e795badffca84ceab52/build_gles2_cmd_buffer.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2598, 1630, 12, 2890, 16, 1544, 4672, 3536, 8368, 326, 1296, 1613, 740, 8395, 585, 273, 385, 2289, 12, 3459, 13, 365, 18, 3067, 1864, 12, 768, 13, 585, 18, 3067, 2932, 7, 683, 9454, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2598, 1630, 12, 2890, 16, 1544, 4672, 3536, 8368, 326, 1296, 1613, 740, 8395, 585, 273, 385, 2289, 12, 3459, 13, 365, 18, 3067, 1864, 12, 768, 13, 585, 18, 3067, 2932, 7, 683, 9454, ...
sql = sql + " FROM %s """ % (tableName)
sql = sql + " FROM %s " % (tableName)
def _rowLoader(self, transaction, tableName, parentRow, data, whereClause, forceChildren): """immediate loading of rowobjects from the table with the whereClause. """ tableInfo = self.schema[tableName] # Build the SQL for the query sql = "SELECT " first = 1 for column, type in tableInfo.rowColumns: if first: first = 0 else: sql = sql + "," sql = sql + " %s" % column sql = sql + " FROM %s """ % (tableName) if whereClause: sql += " WHERE " first = 1 for wItem in whereClause: if first: first = 0 else: sql += " AND " (columnName, cond, value) = wItem t = self.findTypeFor(tableName, columnName) quotedValue = quote(value, t) sql += "%s %s %s" % (columnName, self.conditionalLabels[cond], quotedValue)
3d1177b78afa568866d5353481d56f6ca39d9790 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12595/3d1177b78afa568866d5353481d56f6ca39d9790/sqlreflector.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 492, 2886, 12, 2890, 16, 2492, 16, 4775, 16, 982, 1999, 16, 501, 16, 30300, 16, 2944, 4212, 4672, 3536, 381, 6785, 7153, 434, 1027, 6911, 628, 326, 1014, 598, 326, 30300, 18, 3536...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 492, 2886, 12, 2890, 16, 2492, 16, 4775, 16, 982, 1999, 16, 501, 16, 30300, 16, 2944, 4212, 4672, 3536, 381, 6785, 7153, 434, 1027, 6911, 628, 326, 1014, 598, 326, 30300, 18, 3536...
buffers.append(data) return "".join(buffers) else: buf_len = len(data) if buf_len >= size: self._rbuf = data[size:] return data[:size] buffers = [] if data: buffers.append(data) self._rbuf = "" while True: left = size - buf_len recv_size = min(self._rbufsize, left) data = self._sock.recv(recv_size) if not data: break buffers.append(data)
def read(self, size=-1): data = self._rbuf if size < 0: # Read until EOF buffers = [] if data: buffers.append(data) self._rbuf = "" if self._rbufsize <= 1: recv_size = self.default_bufsize else: recv_size = self._rbufsize while True: data = self._sock.recv(recv_size) if not data: break buffers.append(data) return "".join(buffers) else: # Read until size bytes or EOF seen, whichever comes first buf_len = len(data) if buf_len >= size: self._rbuf = data[size:] return data[:size] buffers = [] if data: buffers.append(data) self._rbuf = "" while True: left = size - buf_len recv_size = min(self._rbufsize, left) data = self._sock.recv(recv_size) if not data: break buffers.append(data) n = len(data) if n >= left: self._rbuf = data[left:] buffers[-1] = data[:left] break buf_len += n return "".join(buffers)
5203dbadc432c6f2260803a4064375469d5ec7b6 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/8125/5203dbadc432c6f2260803a4064375469d5ec7b6/socket.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 12, 2890, 16, 963, 29711, 21, 4672, 501, 273, 365, 6315, 86, 4385, 309, 963, 411, 374, 30, 468, 2720, 3180, 6431, 9664, 273, 5378, 309, 501, 30, 9664, 18, 6923, 12, 892, 13, 365...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 12, 2890, 16, 963, 29711, 21, 4672, 501, 273, 365, 6315, 86, 4385, 309, 963, 411, 374, 30, 468, 2720, 3180, 6431, 9664, 273, 5378, 309, 501, 30, 9664, 18, 6923, 12, 892, 13, 365...
def removeGroup(self, group_id): """Remove a single group, including group workspace, unless keep_workspaces=true."""
def removeGroup(self, group_id, keep_workspaces=0): """Remove a single group, including group workspace, unless keep_workspaces==true."""
def removeGroup(self, group_id): """Remove a single group, including group workspace, unless keep_workspaces=true.""" retval = False managers = self._getGroupManagers() if not managers: raise NotSupported, 'No plugins allow for group management'
7ee3cd4079b5402548070a92147dabfcaa73d905 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12230/7ee3cd4079b5402548070a92147dabfcaa73d905/groups.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1206, 1114, 12, 2890, 16, 1041, 67, 350, 16, 3455, 67, 1252, 9554, 33, 20, 4672, 3536, 3288, 279, 2202, 1041, 16, 6508, 1041, 6003, 16, 3308, 3455, 67, 1252, 9554, 631, 3767, 12123, 52...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1206, 1114, 12, 2890, 16, 1041, 67, 350, 16, 3455, 67, 1252, 9554, 33, 20, 4672, 3536, 3288, 279, 2202, 1041, 16, 6508, 1041, 6003, 16, 3308, 3455, 67, 1252, 9554, 631, 3767, 12123, 52...
if (cpuinfo.cpuinfo()[cpuinfo.IDLE] < 40 or \ cpuinfo.cpuinfo()[cpuinfo.IOWAIT] > 20):
if self.mplayer.running: return self.mplayer.signals['finished'].connect(lambda exitcode: self.start_mplayer()) elif cpuinfo.cpuinfo()[cpuinfo.IDLE] < 40 or cpuinfo.cpuinfo()[cpuinfo.IOWAIT] > 20:
def create_thumbnail(self, code): """ Create thumbnail based on the captures """ job = self._current self._current = None # find thumbnails captures = glob.glob('000000??.png') if not captures: # strange, no image files found self.create_failed(job) self.notify_client(job) job = None if (cpuinfo.cpuinfo()[cpuinfo.IDLE] < 40 or \ cpuinfo.cpuinfo()[cpuinfo.IOWAIT] > 20): # too much CPU load, slow down return kaa.OneShotTimer(self.start_mplayer).start(1) self.start_mplayer() return
aec2f48653ca4a1cbd5c158efb431c06bf66f7ee /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/11703/aec2f48653ca4a1cbd5c158efb431c06bf66f7ee/videothumb.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 14904, 12, 2890, 16, 981, 4672, 3536, 1788, 9134, 2511, 603, 326, 21922, 3536, 1719, 273, 365, 6315, 2972, 365, 6315, 2972, 273, 599, 468, 1104, 30526, 21922, 273, 4715, 18, 105...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 14904, 12, 2890, 16, 981, 4672, 3536, 1788, 9134, 2511, 603, 326, 21922, 3536, 1719, 273, 365, 6315, 2972, 365, 6315, 2972, 273, 599, 468, 1104, 30526, 21922, 273, 4715, 18, 105...
sys.stdout.write("\r%s... (%.1f%%)" % (
self._stream.write("\r%s... (%.1f%%)" % (
def __call__(self, percent): promille = int(percent * 1000) if promille != self._promille: sys.stdout.write("\r%s... (%.1f%%)" % ( self._message, percent * 100)) sys.stdout.flush() self._promille = promille
95847c73151e890a9ea467f69ce55911dbf845a5 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/10394/95847c73151e890a9ea467f69ce55911dbf845a5/progress.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1991, 972, 12, 2890, 16, 5551, 4672, 3012, 14120, 273, 509, 12, 8849, 380, 4336, 13, 309, 3012, 14120, 480, 365, 6315, 17401, 14120, 30, 365, 6315, 3256, 18, 2626, 31458, 86, 9, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1991, 972, 12, 2890, 16, 5551, 4672, 3012, 14120, 273, 509, 12, 8849, 380, 4336, 13, 309, 3012, 14120, 480, 365, 6315, 17401, 14120, 30, 365, 6315, 3256, 18, 2626, 31458, 86, 9, ...
ORDER BY u.email""" % (id_role, ))
ORDER BY u.email""", (id_role, ))
def acc_getRoleUsers(id_role): """get all users that have access to a role. """ return run_sql("""SELECT DISTINCT(u.id), u.email, u.settings FROM user_accROLE ur, user u WHERE ur.id_accROLE = %s AND u.id = ur.id_user ORDER BY u.email""" % (id_role, ))
26e50a54ed5eb379037d5a141a2bb8b9d14b42b7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/1931/26e50a54ed5eb379037d5a141a2bb8b9d14b42b7/access_control_admin.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4078, 67, 588, 2996, 6588, 12, 350, 67, 4615, 4672, 3536, 588, 777, 3677, 716, 1240, 2006, 358, 279, 2478, 18, 3536, 225, 327, 1086, 67, 4669, 2932, 3660, 4803, 23286, 12, 89, 18, 350,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4078, 67, 588, 2996, 6588, 12, 350, 67, 4615, 4672, 3536, 588, 777, 3677, 716, 1240, 2006, 358, 279, 2478, 18, 3536, 225, 327, 1086, 67, 4669, 2932, 3660, 4803, 23286, 12, 89, 18, 350,...
flattened.append(c)
flat.append(c)
def _setTypecodeList(self): """generates ofwhat content, minOccurs/maxOccurs facet generation. Dependency instance attribute: mgContent -- expected to be either a complex definition with model group content, a model group, or model group content. TODO: should only support the first two. localTypes -- produce local class definitions later tcListElements -- elements, local/global """ flattened = [] content = self.mgContent if type(self.mgContent) is not tuple: mg = self.mgContent if not mg.isModelGroup(): mg = mg.content content = mg.content if mg.isAll(): flattend = content content = [] elif mg.isModelGroup() and mg.isDefinition(): mg = mg.content content = mg.content idx = 0 content = list(content) while idx < len(content): c = orig = content[idx] if c.isElement(): flattened.append(c) idx += 1 continue if c.isReference() and c.isModelGroup(): c = c.getModelGroupReference() if c.isDefinition() and c.isModelGroup(): c = c.content if c.isSequence() or c.isChoice(): begIdx = idx endIdx = begIdx + len(c.content) for i in range(begIdx, endIdx): content.insert(i, c.content[i-begIdx]) content.remove(orig) continue raise ContainerError, 'unexpected schema item: %s' %c.getItemTrace()
5a63ff860ff4ca34a4be7e3546b2601faf89cfd7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/14538/5a63ff860ff4ca34a4be7e3546b2601faf89cfd7/containers.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 542, 559, 710, 682, 12, 2890, 4672, 3536, 3441, 815, 434, 23770, 913, 16, 1131, 31084, 19, 1896, 31084, 11082, 9377, 18, 11993, 791, 1566, 30, 11174, 1350, 1493, 2665, 358, 506, 334...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 542, 559, 710, 682, 12, 2890, 4672, 3536, 3441, 815, 434, 23770, 913, 16, 1131, 31084, 19, 1896, 31084, 11082, 9377, 18, 11993, 791, 1566, 30, 11174, 1350, 1493, 2665, 358, 506, 334...
launched = launch.launch_wave_around_unit ("Shadow",faction,faction_ships.getRandomFighter(faction),ai,vsrandom.randrange(1,10),2000.0,4000.0,VS.getPlayer(),'')
launched = launch.launch_wave_around_unit ("Shadow",faction,faction_ships.getRandomFighter(faction),ai,vsrandom.randrange(1,10),100.0,2000.0,VS.getPlayer(),'') if (vsrandom.randrange(0,10)==0): launch.launch_wave_around_unit ("ShadowCap",faction,faction_ships.getRandomCapitol(faction),ai,1,2000.0,4000.0,VS.getPlayer(),'')
def launch_new_wave(self): print "testing" side = vsrandom.randrange(0,2) faction="confed" ai = vsrandom.randrange(0,2) if (ai==0): ai = "printhello.py" else: ai = "default" if (side==0): faction=faction_ships.get_enemy_of("confed") else: faction=faction_ships.get_friend_of("confed") launched = launch.launch_wave_around_unit ("Shadow",faction,faction_ships.getRandomFighter(faction),ai,vsrandom.randrange(1,10),2000.0,4000.0,VS.getPlayer(),'')
51d0cc42b3afc8db952b1e989680bf85a0ddb40a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2883/51d0cc42b3afc8db952b1e989680bf85a0ddb40a/total_war.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 8037, 67, 2704, 67, 17838, 12, 2890, 4672, 1172, 315, 3813, 310, 6, 4889, 273, 6195, 9188, 18, 7884, 3676, 12, 20, 16, 22, 13, 284, 1128, 1546, 3923, 329, 6, 14679, 273, 6195, 9188, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 8037, 67, 2704, 67, 17838, 12, 2890, 4672, 1172, 315, 3813, 310, 6, 4889, 273, 6195, 9188, 18, 7884, 3676, 12, 20, 16, 22, 13, 284, 1128, 1546, 3923, 329, 6, 14679, 273, 6195, 9188, ...
if not self.checkDir('watchdata'):
if not checkDir('watchdata'):
def writeWatchDataToFile(self): """Outputs watch data to permanent storage (disk)""" if not self.checkDir('watchdata'): mkdir('watchdata')
a5a2497f9290e730263a463cbf28a312754709c7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2194/a5a2497f9290e730263a463cbf28a312754709c7/bot.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1045, 5234, 751, 15450, 12, 2890, 4672, 3536, 13856, 4267, 501, 358, 16866, 2502, 261, 10863, 15574, 309, 486, 866, 1621, 2668, 7585, 892, 11, 4672, 6535, 2668, 7585, 892, 6134, 2, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1045, 5234, 751, 15450, 12, 2890, 4672, 3536, 13856, 4267, 501, 358, 16866, 2502, 261, 10863, 15574, 309, 486, 866, 1621, 2668, 7585, 892, 11, 4672, 6535, 2668, 7585, 892, 6134, 2, -100, ...
elif arg.startswith('-namespace'): action = True if len(arg) == 10: namespace2 = int(wikipedia.input(u'Which namespace should be processed?')) else: namespace2 =int( arg[11:]) for page in pagegenerators.AllpagesPageGenerator(start=start, namespace=namespace2, includeredirects=False): workon(page) if not action:
elif title is not None: page = wikipedia.Page(wikipedia.getSite(), title) workon(page) elif namespace is not None: for page in pagegenerators.AllpagesPageGenerator(start=start, namespace=namespace, includeredirects=False): workon(page) else:
def workon(page): try: text = page.get() except wikipedia.IsRedirectPage: return wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) links = page.linkedPages() wikipedia.getall(mysite,links) for page2 in links: try: target = page2.getRedirectTarget() except (wikipedia.Error,wikipedia.SectionError): continue text = treat(text, page2, target) if text != page.get(): comment = wikipedia.translate(mysite, msg) page.put(text, comment)
89a93e7c410791c4b730c6fda9f2d91608b2c69d /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/4404/89a93e7c410791c4b730c6fda9f2d91608b2c69d/fixing_redirects.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1440, 265, 12, 2433, 4672, 775, 30, 977, 273, 1363, 18, 588, 1435, 1335, 21137, 18, 2520, 5961, 1964, 30, 327, 21137, 18, 2844, 12, 89, 12691, 82, 64, 82, 23012, 521, 4630, 95, 5099, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1440, 265, 12, 2433, 4672, 775, 30, 977, 273, 1363, 18, 588, 1435, 1335, 21137, 18, 2520, 5961, 1964, 30, 327, 21137, 18, 2844, 12, 89, 12691, 82, 64, 82, 23012, 521, 4630, 95, 5099, ...
_res = tuple([array(x,copy=False).astype(c) \
_res = tuple([array(x,copy=False,subok=True).astype(c) \
def __call__(self, *args): # get number of outputs and output types by calling # the function on the first entries of args nargs = len(args) if self.nin: if (nargs > self.nin) or (nargs < self.nin_wo_defaults): raise ValueError, "mismatch between python function inputs"\ " and received arguments"
e052f245f10a3f13e33f1e6fc2d15f22c03b377f /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/14925/e052f245f10a3f13e33f1e6fc2d15f22c03b377f/function_base.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1991, 972, 12, 2890, 16, 380, 1968, 4672, 468, 336, 1300, 434, 6729, 471, 876, 1953, 635, 4440, 468, 225, 326, 445, 603, 326, 1122, 3222, 434, 833, 14440, 273, 562, 12, 1968, 13,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1991, 972, 12, 2890, 16, 380, 1968, 4672, 468, 336, 1300, 434, 6729, 471, 876, 1953, 635, 4440, 468, 225, 326, 445, 603, 326, 1122, 3222, 434, 833, 14440, 273, 562, 12, 1968, 13,...
print >> web.debug, 'removing', path
def _delete_images(self, id): for size in config.image_sizes: path = self._imgpath(id, size) try: print >> web.debug, 'removing', path os.remove(path) except: pass del self.atimes[id]
0a7145e333caed3a816a7eca2772dce5e099190b /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/3913/0a7145e333caed3a816a7eca2772dce5e099190b/imagecache.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 3733, 67, 7369, 12, 2890, 16, 612, 4672, 364, 963, 316, 642, 18, 2730, 67, 11914, 30, 589, 273, 365, 6315, 6081, 803, 12, 350, 16, 963, 13, 775, 30, 1140, 18, 4479, 12, 803, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 3733, 67, 7369, 12, 2890, 16, 612, 4672, 364, 963, 316, 642, 18, 2730, 67, 11914, 30, 589, 273, 365, 6315, 6081, 803, 12, 350, 16, 963, 13, 775, 30, 1140, 18, 4479, 12, 803, 1...
if logging.ask_for_repair('''Check user %s ?''' %
if logging.ask_for_repair('''Check user %s?''' %
def chk_user(self, opts, args): """ Check one or more user account(s). """ include_id_lists=[ (opts.login, LMC.users.login_to_uid), (opts.uid, LMC.users.confirm_uid) ] exclude_id_lists=[ (opts.exclude, LMC.users.guess_identifier), (opts.exclude_login, LMC.users.login_to_uid), (opts.exclude_uid, LMC.users.confirm_uid) ] if opts.all and ( ( # NOTE TO THE READER: don't event try to simplify these conditions, # or the order the tests: they just MATTER. Read the tests in pure # english to undestand them and why the order is important. opts.non_interactive and opts.force) or opts.batch \ or (opts.non_interactive and logging.ask_for_repair( 'Are you sure you want to check all users ?', auto_answer=opts.auto_answer) or not opts.non_interactive) ): include_id_lists.extend([ (LMC.users.Select(filters.STD), LMC.users.confirm_uid), (LMC.users.Select(filters.SYSUNRSTR), LMC.users.confirm_uid) ]) uids_to_chk = self.select(LMC.users, 'user', args, include_id_lists=include_id_lists, exclude_id_lists=exclude_id_lists)
a1acd345c6b587c46acec53d660359e9c2c78c73 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/a1acd345c6b587c46acec53d660359e9c2c78c73/rwi.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 15000, 67, 1355, 12, 2890, 16, 1500, 16, 833, 4672, 3536, 2073, 1245, 578, 1898, 729, 2236, 12, 87, 2934, 3536, 2341, 67, 350, 67, 9772, 22850, 261, 4952, 18, 5819, 16, 511, 20022, 18,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 15000, 67, 1355, 12, 2890, 16, 1500, 16, 833, 4672, 3536, 2073, 1245, 578, 1898, 729, 2236, 12, 87, 2934, 3536, 2341, 67, 350, 67, 9772, 22850, 261, 4952, 18, 5819, 16, 511, 20022, 18,...
def getWildcardReading(entities): entityList = [] for entity in entities: if entityList and entityList[-1] != '%' and entity != '%': entityList.append(' ') entityList.append(entity) return ''.join(entityList)
def getWildcardReading(entities): entityList = [] for entity in entities: # insert space to separate reading entities, but only if we are # not looking for a wildcard with a possibly empty match if entityList and entityList[-1] != '%' and entity != '%': entityList.append(' ') entityList.append(entity) return ''.join(entityList)
33c7bd3387c933504796661bab3ca4ab548a02ff /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11128/33c7bd3387c933504796661bab3ca4ab548a02ff/dictionary.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 12121, 15714, 12, 9996, 4672, 1522, 682, 273, 5378, 364, 1522, 316, 5140, 30, 468, 2243, 3476, 358, 9004, 6453, 5140, 16, 1496, 1338, 309, 732, 854, 468, 282, 486, 7849, 364, 279, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 12121, 15714, 12, 9996, 4672, 1522, 682, 273, 5378, 364, 1522, 316, 5140, 30, 468, 2243, 3476, 358, 9004, 6453, 5140, 16, 1496, 1338, 309, 732, 854, 468, 282, 486, 7849, 364, 279, ...
try:
if reference in names:
def main_effect(self, reference=None): """ Return the 'main effect' columns of a factor, choosing an optional reference key.
7e39d3f4061e5f525c4d797aca861e0d89d74a4f /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12971/7e39d3f4061e5f525c4d797aca861e0d89d74a4f/formula.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 67, 13867, 12, 2890, 16, 2114, 33, 7036, 4672, 3536, 2000, 326, 296, 5254, 5426, 11, 2168, 434, 279, 5578, 16, 24784, 310, 392, 3129, 2114, 498, 18, 2, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 67, 13867, 12, 2890, 16, 2114, 33, 7036, 4672, 3536, 2000, 326, 296, 5254, 5426, 11, 2168, 434, 279, 5578, 16, 24784, 310, 392, 3129, 2114, 498, 18, 2, -100, -100, -100, -100, -1...
"""end(self) -> const_iterator"""
""" end(self) -> iterator end(self) -> const_iterator """
def end(*args): """end(self) -> const_iterator""" return _moose.double_vector_end(*args)
a30e1b85be87f4bb65146e5509e165b2cf26068d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2961/a30e1b85be87f4bb65146e5509e165b2cf26068d/moose.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 679, 30857, 1968, 4672, 3536, 679, 12, 2890, 13, 317, 2775, 679, 12, 2890, 13, 317, 1866, 67, 9838, 3536, 327, 389, 8683, 2584, 18, 9056, 67, 7737, 67, 409, 30857, 1968, 13, 2, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 679, 30857, 1968, 4672, 3536, 679, 12, 2890, 13, 317, 2775, 679, 12, 2890, 13, 317, 1866, 67, 9838, 3536, 327, 389, 8683, 2584, 18, 9056, 67, 7737, 67, 409, 30857, 1968, 13, 2, -100, ...
if not resource or not (resource.realm or resource.id): return []
if not resource: return ['*:*@*'] if not (resource.realm or resource.id): return ['%s:%s@%s' % (resource.realm or '*', resource.id or '*', resource.version or '*')]
def flatten(resource): if not resource or not (resource.realm or resource.id): return [] # XXX Due to the mixed functionality in resource we can end up with # ticket, ticket:1, ticket:1@10. This code naively collapses all # subsets of the parent resource into one. eg. ticket:1@10 parent = resource.parent while parent and (resource.realm == parent.realm or \ (resource.realm == parent.realm and resource.id == parent.id)): parent = parent.parent if parent: parent = flatten(parent) else: parent = [] return parent + ['%s:%s@%s' % (resource.realm or '*', resource.id or '*', resource.version or '*')]
7640367b6a9c2cb4891be692242d1c4d6fd4849b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2831/7640367b6a9c2cb4891be692242d1c4d6fd4849b/authz_policy.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 5341, 12, 3146, 4672, 309, 486, 1058, 30, 327, 10228, 14, 30, 14, 36, 14, 3546, 309, 486, 261, 3146, 18, 24056, 578, 1058, 18, 350, 4672, 327, 10228, 9, 87, 5319, 87, 36, 9, 87, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 5341, 12, 3146, 4672, 309, 486, 1058, 30, 327, 10228, 14, 30, 14, 36, 14, 3546, 309, 486, 261, 3146, 18, 24056, 578, 1058, 18, 350, 4672, 327, 10228, 9, 87, 5319, 87, 36, 9, 87, 11...
raise OCFException("<container/> element missing")
raise OCFException("<container> element missing")
def __init__(self, stream=None): if not stream: return soup = BeautifulStoneSoup(stream.read()) container = soup.find('container') if not container: raise OCFException("<container/> element missing") if container.get('version', None) != '1.0': raise EPubException("unsupported version of OCF") rootfiles = container.find('rootfiles') if not rootfiles: raise EPubException("<rootfiles/> element missing") for rootfile in rootfiles.findAll('rootfile'): try: self[rootfile['media-type']] = rootfile['full-path'] except KeyError: raise EPubException("<rootfile/> element malformed")
8050aac496e6cfb5cb90538900d8639c1ea91e45 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9125/8050aac496e6cfb5cb90538900d8639c1ea91e45/epub.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1407, 33, 7036, 4672, 309, 486, 1407, 30, 327, 15418, 273, 27358, 510, 476, 27069, 12, 3256, 18, 896, 10756, 1478, 273, 15418, 18, 4720, 2668, 3782, 6134, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1407, 33, 7036, 4672, 309, 486, 1407, 30, 327, 15418, 273, 27358, 510, 476, 27069, 12, 3256, 18, 896, 10756, 1478, 273, 15418, 18, 4720, 2668, 3782, 6134, ...
allcolor.update(self.colornodes) for txt in [x for x in self.colornodes if ',' in x]:
allcolor.update(self.colorfiltervalues) for txt in [x for x in self.colorfiltervalues if ',' in x]:
def sendForm(self): request = self.request
e73de5490b0bac6c617d591876d17d0253061975 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/888/e73de5490b0bac6c617d591876d17d0253061975/ShowGraph.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1366, 1204, 12, 2890, 4672, 590, 273, 365, 18, 2293, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1366, 1204, 12, 2890, 4672, 590, 273, 365, 18, 2293, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
Returns the degree centrality (fraction of nodes connected to) as a dictionary of values keyed by node. The degree centrality is
Returns the degree centrality (fraction of vertices connected to) as a dictionary of values keyed by vertex. The degree centrality is
def centrality_degree(self, v=False): r""" Returns the degree centrality (fraction of nodes connected to) as a dictionary of values keyed by node. The degree centrality is normalized to be in range (0,1). Measures of the centrality of a vertex within a graph determine the relative importance of that node to its graph. Degree centrality measures the number of links incident upon a node. INPUT: v -- a vertex label (to find degree centrality of only one node) EXAMPLES: sage: (graphs.ChvatalGraph()).centrality_degree() {0: 0.36363636363636365, 1: 0.36363636363636365, 2: 0.36363636363636365, 3: 0.36363636363636365, 4: 0.36363636363636365, 5: 0.36363636363636365, 6: 0.36363636363636365, 7: 0.36363636363636365, 8: 0.36363636363636365, 9: 0.36363636363636365, 10: 0.36363636363636365, 11: 0.36363636363636365} sage: D = DiGraph({0:[1,2,3], 1:[2], 3:[0,1]}) sage.: D.show(figsize=[2,2]) sage: D = D.to_undirected() sage.: D.show(figsize=[2,2]) sage: D.centrality_degree() {0: 1.0, 1: 1.0, 2: 0.66666666666666663, 3: 0.66666666666666663} sage: D.centrality_degree(v=1) 1.0 """ import networkx return networkx.degree_centrality(self._nxg, v)
a520fa15cff86fbbd1335f8b6f9d36203c74d574 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9890/a520fa15cff86fbbd1335f8b6f9d36203c74d574/graph.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 276, 8230, 7919, 67, 21361, 12, 2890, 16, 331, 33, 8381, 4672, 436, 8395, 2860, 326, 10782, 276, 8230, 7919, 261, 16744, 434, 6928, 5840, 358, 13, 487, 279, 3880, 434, 924, 17408, 635, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 276, 8230, 7919, 67, 21361, 12, 2890, 16, 331, 33, 8381, 4672, 436, 8395, 2860, 326, 10782, 276, 8230, 7919, 261, 16744, 434, 6928, 5840, 358, 13, 487, 279, 3880, 434, 924, 17408, 635, ...
__chains = {} __starts = [] __sample = "" __sentence_mean = 0 __sentence_sigma = 0 __paragraph_mean = 0 __paragraph_sigma = 0 __generated_sentence_mean = 0 __generated_sentence_sigma = 0 __generated_paragraph_mean = 0 __generated_paragraph_sigma = 0 def __init__(self, sample=_DEFAULT_SAMPLE, dictionary=_DEFAULT_DICT): """ Initialises a lorem ipsum generator by performing ahead of time the calculations required by all "generations". Requires a sample text and a list of words. """ self.sample = sample self.dictionary = dictionary def __set_sentence_mean(self, mean): if mean < 0: raise ValueError('Mean sentence length must be non-negative.') self.__sentence_mean = mean def __set_sentence_sigma(self, sigma): if sigma < 0: raise ValueError('Standard deviation of sentence length must be ' 'non-negative.') self.__sentence_sigma = sigma def __set_paragraph_mean(self, mean): if mean < 0: raise ValueError('Mean paragraph length must be non-negative.') self.__paragraph_mean = mean def __set_paragraph_sigma(self, sigma): if sigma < 0: raise ValueError('Standard deviation of paragraph length must be ' 'non-negative.') self.__paragraph_sigma = sigma def __get_sentence_mean(self): """ A non-negative value determining the mean sentence length (in words) of generated sentences. Is changed to match the sample text when the sample text is updated. """ return self.__sentence_mean def __get_sentence_sigma(self): """ A non-negative value determining the standard deviation of sentence lengths (in words) of generated sentences. Is changed to match the sample text when the sample text is updated. """ return self.__sentence_sigma def __get_paragraph_mean(self): """ A non-negative value determining the mean paragraph length (in sentences) of generated sentences. Is changed to match the sample text when the sample text is updated. """ return self.__paragraph_mean def __get_paragraph_sigma(self): """ A non-negative value determining the standard deviation of paragraph lengths (in sentences) of generated sentences. Is changed to match the sample text when the sample text is updated. """ return self.__paragraph_sigma sentence_mean = property(__get_sentence_mean, __set_sentence_mean) sentence_sigma = property(__get_sentence_sigma, __set_sentence_sigma) paragraph_mean = property(__get_paragraph_mean, __set_paragraph_mean) paragraph_sigma = property(__get_paragraph_sigma, __set_paragraph_sigma) def __generate_chains(self, sample): """ Generates the __chains and __starts values required for sentence generation. """ words = _split_words(sample) if len(words) <= 0: raise InvalidSampleError word_info = map(_get_word_info, words) previous = (0, 0) chains = {} starts = [previous] for pair in word_info: if pair[0] == 0: continue chains.setdefault(previous, []).append(pair) if pair[1] in _DELIMITERS_SENTENCES: starts.append(previous) previous = (previous[1], pair[0]) if len(chains) > 0: self.__chains = chains self.__starts = starts else: raise InvalidSampleError def __generate_statistics(self, sample): """ Calculates the mean and standard deviation of sentence and paragraph lengths. """ self.__generate_sentence_statistics(sample) self.__generate_paragraph_statistics(sample) self.reset_statistics() def __generate_sentence_statistics(self, sample): """ Calculates the mean and standard deviation of the lengths of sentences (in words) in a sample text. """ sentences = filter(lambda s : len(s.strip()) > 0, _split_sentences(sample)) sentence_lengths = map(len, map(_split_words, sentences)) self.__generated_sentence_mean = _mean(sentence_lengths) self.__generated_sentence_sigma = _sigma(sentence_lengths) def __generate_paragraph_statistics(self, sample): """ Calculates the mean and standard deviation of the lengths of paragraphs (in sentences) in a sample text. """ paragraphs = filter(lambda s : len(s.strip()) > 0, _split_paragraphs(sample)) paragraph_lengths = map(len, map(_split_sentences, paragraphs)) self.__generated_paragraph_mean = _mean(paragraph_lengths) self.__generated_paragraph_sigma = _sigma(paragraph_lengths) def reset_statistics(self): """ Returns the values of sentence_mean, sentence_sigma, paragraph_mean, and paragraph_sigma to their values as calculated from the sample text. """ self.sentence_mean = self.__generated_sentence_mean self.sentence_sigma = self.__generated_sentence_sigma self.paragraph_mean = self.__generated_paragraph_mean self.paragraph_sigma = self.__generated_paragraph_sigma def __get_sample(self): return self.__sample def __set_sample(self, sample): """ Sets the generator to be based on a new sample text. """ self.__sample = sample self.__generate_chains(sample) self.__generate_statistics(sample) def __set_dictionary(self, dictionary): """ Sets the generator to use a given selection of words for generating sentences with. """ words = {} for word in dictionary: try: word = str(word) words.setdefault(len(word), set()).add(word) except TypeError: continue if len(words) > 0: self.__words = words else: raise InvalidDictionaryError def __get_dictionary(self): dictionary = [] map(dictionary.extend, self.__words.values()) return dictionary sample = property(__get_sample, __set_sample) dictionary = property(__get_dictionary, __set_dictionary) def __choose_random_start(self): starts = set(self.__starts) chains = set(self.__chains.keys()) valid_starts = list(chains.intersection(starts)) return random.choice(valid_starts) def generate_sentence(self, start_with_lorem=False): """ Generates a single sentence, of random length. If start_with_lorem=True, then the sentence will begin with the standard "Lorem ipsum..." first sentence. """ if len(self.__chains) == 0 or len(self.__starts) == 0: raise InvalidSampleError if len(self.__words) == 0: raise InvalidDictionaryError sentence_length = random.normalvariate(self.sentence_mean, \ self.sentence_sigma) sentence_length = max(int(round(sentence_length)), 1) sentence = [] previous = () word_delimiter = '' if start_with_lorem: lorem = "lorem ipsum dolor sit amet, consecteteur adipiscing elit" lorem = lorem.split() sentence += lorem[:sentence_length] last_char = sentence[-1][-1] if last_char in _DELIMITERS_WORDS: word_delimiter = last_char while len(sentence) < sentence_length: if (not self.__chains.has_key(previous)): previous = self.__choose_random_start() chain = random.choice(self.__chains[previous]) word_length = chain[0] if chain[1] in _DELIMITERS_SENTENCES: word_delimiter = '' else: word_delimiter = chain[1] closest_length = _choose_closest( self.__words.keys(), word_length) word = random.choice(list(self.__words[closest_length])) sentence += [word + word_delimiter] previous = (previous[1], word_length) sentence = ' '.join(sentence) sentence = sentence.capitalize() sentence = sentence.rstrip(word_delimiter) + '.' return sentence def generate_paragraph(self, start_with_lorem=False): """ Generates a single lorem ipsum paragraph, of random length. If start_with_lorem=True, then the paragraph will begin with the standard "Lorem ipsum..." first sentence. """ paragraph = [] paragraph_length = random.normalvariate(self.paragraph_mean, \ self.paragraph_sigma) paragraph_length = max(int(round(paragraph_length)), 1) while len(paragraph) < paragraph_length: sentence = self.generate_sentence( start_with_lorem = (start_with_lorem and len(paragraph) == 0) ) paragraph += [sentence] paragraph = ' '.join(paragraph) return paragraph class MarkupGenerator(Generator): """ Generates random strings of "lorem ipsum" text, based on the word distribution of a given sample text, using the words in a given dictionary. Provides a number of methods for producing "lorem ipsum" text with varying formats. """ def __generate_markup(self, begin, end, between, quantity, start_with_lorem, function): """ Generates multiple pieces of text, with begin before each piece, end after each piece, and between between each piece. Accepts a function that returns a string. """ text = [] while len(text) < quantity: part = function( start_with_lorem = (start_with_lorem and len(text) == 0) ) part = begin + part + end text += [part] text = between.join(text) return text def __generate_markup_paragraphs(self, begin_paragraph, end_paragraph, between_paragraphs, quantity, start_with_lorem=False): return self.__generate_markup( begin_paragraph, end_paragraph, between_paragraphs, quantity, start_with_lorem, self.generate_paragraph) def __generate_markup_sentences(self, begin_sentence, end_sentence, between_sentences, quantity, start_with_lorem=False): return self.__generate_markup( begin_sentence, end_sentence, between_sentences, quantity, start_with_lorem, self.generate_sentence) def generate_paragraphs_plain(self, quantity, start_with_lorem=False): """Generates a number of paragraphs, separated by empty lines.""" return self.__generate_markup_paragraphs( begin_paragraph='', end_paragraph='', between_paragraphs=_NEWLINE * 2, quantity=quantity, start_with_lorem=start_with_lorem ) def generate_sentences_plain(self, quantity, start_with_lorem=False): """Generates a number of sentences.""" return self.__generate_markup_sentences( begin_sentence='', end_sentence='', between_sentences=' ', quantity=quantity, start_with_lorem=start_with_lorem ) def generate_paragraphs_html_p(self, quantity, start_with_lorem=False): """ Generates a number of paragraphs, with each paragraph surrounded by HTML pararaph tags. """ return self.__generate_markup_paragraphs( begin_paragraph='<p>' + _NEWLINE + '\t', end_paragraph=_NEWLINE + '</p>', between_paragraphs=_NEWLINE, quantity=quantity, start_with_lorem=start_with_lorem ) def generate_sentences_html_p(self, quantity, start_with_lorem=False): """ Generates a number of sentences, with each sentence surrounded by HTML pararaph tags. """ return self.__generate_markup_sentences( begin_sentence='<p>' + _NEWLINE + '\t', end_sentence=_NEWLINE + '</p>', between_sentences=_NEWLINE, quantity=quantity, start_with_lorem=start_with_lorem ) def generate_paragraphs_html_li(self, quantity, start_with_lorem=False): """Generates a number of paragraphs, separated by empty lines.""" output = self.__generate_markup_paragraphs( begin_paragraph='\t<li>\n\t\t', end_paragraph='\n\t</li>', between_paragraphs=_NEWLINE, quantity=quantity, start_with_lorem=start_with_lorem ) return ('<ul>' + _NEWLINE + output + _NEWLINE + '</ul>') def generate_sentences_html_li(self, quantity, start_with_lorem=False): """Generates a number of sentences surrounded by HTML 'li' tags.""" output = self.__generate_markup_sentences( begin_sentence='\t<li>' + _NEWLINE + '\t\t', end_sentence=_NEWLINE + '\t</li>', between_sentences=_NEWLINE, quantity=quantity, start_with_lorem=start_with_lorem ) return ('<ul>' + _NEWLINE + output + _NEWLINE + '</ul>')
if __name__ == '__main__': unittest.main()
def __str__(self): return ('The sample text must contain one or more empty-line ' 'delimited paragraphs, and each paragraph must contain one or ' 'more period, question mark, or exclamation mark delimited ' 'sentences.')
fa9e6662a1f30cb272942f25e233798ee7edac45 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/3480/fa9e6662a1f30cb272942f25e233798ee7edac45/testlipsum.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 701, 972, 12, 2890, 4672, 327, 7707, 1986, 3296, 977, 1297, 912, 1245, 578, 1898, 1008, 17, 1369, 296, 296, 3771, 1038, 329, 24552, 16, 471, 1517, 10190, 1297, 912, 1245, 578, 296,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 701, 972, 12, 2890, 4672, 327, 7707, 1986, 3296, 977, 1297, 912, 1245, 578, 1898, 1008, 17, 1369, 296, 296, 3771, 1038, 329, 24552, 16, 471, 1517, 10190, 1297, 912, 1245, 578, 296,...
x = print_record(recIDs[irec], format, ot, ln)
x = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern, uid=uid)
def print_records(req, recIDs, jrec=1, rg=10, format='hb', ot='', ln=cdslang, relevances=[], relevances_prologue="(", relevances_epilogue="%%)", decompress=zlib.decompress): """Prints list of records 'recIDs' formatted accoding to 'format' in groups of 'rg' starting from 'jrec'. Assumes that the input list 'recIDs' is sorted in reverse order, so it counts records from tail to head. A value of 'rg=-9999' means to print all records: to be used with care. Print also list of RELEVANCES for each record (if defined), in between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE. """ # load the right message language _ = gettext_set_language(ln) # sanity checking: if req == None: return if len(recIDs): nb_found = len(recIDs) if rg == -9999: # print all records rg = nb_found else: rg = abs(rg) if jrec < 1: # sanity checks jrec = 1 if jrec > nb_found: jrec = max(nb_found-rg+1, 1) # will print records from irec_max to irec_min excluded: irec_max = nb_found - jrec irec_min = nb_found - jrec - rg if irec_min < 0: irec_min = -1 if irec_max >= nb_found: irec_max = nb_found - 1 #req.write("%s:%d-%d" % (recIDs, irec_min, irec_max)) if format.startswith('x'): # we are doing XML output: for irec in range(irec_max,irec_min,-1): req.write(print_record(recIDs[irec], format, ot, ln)) elif format.startswith('t') or str(format[0:3]).isdigit(): # we are doing plain text output: for irec in range(irec_max,irec_min,-1): x = print_record(recIDs[irec], format, ot, ln) req.write(x) if x: req.write('\n') else: # we are doing HTML output: if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"): # portfolio and on-the-fly formats: for irec in range(irec_max,irec_min,-1): req.write(print_record(recIDs[irec], format, ot, ln)) elif format.startswith("hb"): # HTML brief format: rows = [] for irec in range(irec_max,irec_min,-1): temp = { 'number' : jrec+irec_max-irec, 'recid' : recIDs[irec], } if relevances and relevances[irec]: temp['relevance'] = relevances[irec] else: temp['relevance'] = '' temp['record'] = print_record(recIDs[irec], format, ot, ln) rows.append(temp) req.write(websearch_templates.tmpl_records_format_htmlbrief( ln = ln, weburl = weburl, rows = rows, relevances_prologue = relevances_prologue, relevances_epilogue = relevances_epilogue, )) else: # HTML detailed format: # print other formatting choices: rows = [] for irec in range(irec_max,irec_min,-1): temp = { 'record' : print_record(recIDs[irec], format, ot, ln), 'recid' : recIDs[irec], 'creationdate': '', 'modifydate' : '', } if record_exists(recIDs[irec])==1: temp['creationdate'] = get_creation_date(recIDs[irec]) temp['modifydate'] = get_modification_date(recIDs[irec]) if cfg_experimental_features: r = calculate_cited_by_list(recIDs[irec]) if r: temp ['citinglist'] = r temp ['citationhistory'] = create_citation_history_graph_and_box(recIDs[irec], ln) r = calculate_co_cited_with_list(recIDs[irec]) if r: temp ['cociting'] = r r = calculate_reading_similarity_list(recIDs[irec], "downloads") if r: temp ['downloadsimilarity'] = r temp ['downloadhistory'] = create_download_history_graph_and_box(recIDs[irec], ln) # Get comments and reviews for this record if exist # FIXME: templatize me if cfg_webcomment_allow_comments or cfg_webcomment_allow_reviews: from invenio.webcomment import get_first_comments_or_remarks (comments, reviews) = get_first_comments_or_remarks(recID=recIDs[irec], ln=ln, nb_comments=cfg_webcomment_nb_comments_in_detailed_view, nb_reviews=cfg_webcomment_nb_reviews_in_detailed_view) temp['comments'] = comments temp['reviews'] = reviews r = calculate_reading_similarity_list(recIDs[irec], "pageviews") if r: temp ['viewsimilarity'] = r rows.append(temp) req.write(websearch_templates.tmpl_records_format_other( ln = ln, weburl = weburl, url_argd = req.argd, rows = rows, format = format, )) else: print_warning(req, _("Use different search terms."))
cedddddc8cebbac6e7a9977942fa7192ccf7a919 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2139/cedddddc8cebbac6e7a9977942fa7192ccf7a919/search_engine.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1172, 67, 7094, 12, 3658, 16, 1950, 5103, 16, 525, 3927, 33, 21, 16, 14524, 33, 2163, 16, 740, 2218, 76, 70, 2187, 15835, 2218, 2187, 7211, 33, 4315, 2069, 539, 16, 6707, 90, 6872, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1172, 67, 7094, 12, 3658, 16, 1950, 5103, 16, 525, 3927, 33, 21, 16, 14524, 33, 2163, 16, 740, 2218, 76, 70, 2187, 15835, 2218, 2187, 7211, 33, 4315, 2069, 539, 16, 6707, 90, 6872, 2...
drawsphere(color, pos, drawrad, level)
if self.element is not Singlet or not debug_pref("draw bondpoints as stubs", Choice_boolean_False): drawsphere(color, pos, drawrad, level)
def draw(self, glpane, dispdef, col, level): """Draw this atom depending on whether it is picked and its display mode (possibly inherited from dispdef). An atom's display mode overrides the inherited one from the molecule or glpane, but a molecule's color overrides the atom's element-dependent one. No longer treats glpane.selatom specially (caller can draw selatom separately, on top of the regular atom). Also draws picked-atom wireframe, but doesn't draw any bonds. Return value gives the display mode we used (our own or inherited). """ assert not self.__killed disp = default_display_mode # to be returned in case of early exception
2373384af6b9675cf191d457a36ed49435ade4f0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11221/2373384af6b9675cf191d457a36ed49435ade4f0/chem.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3724, 12, 2890, 16, 5118, 29009, 16, 16232, 536, 16, 645, 16, 1801, 4672, 3536, 6493, 333, 3179, 8353, 603, 2856, 518, 353, 25534, 471, 2097, 2562, 1965, 261, 917, 8781, 12078, 628, 1623...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3724, 12, 2890, 16, 5118, 29009, 16, 16232, 536, 16, 645, 16, 1801, 4672, 3536, 6493, 333, 3179, 8353, 603, 2856, 518, 353, 25534, 471, 2097, 2562, 1965, 261, 917, 8781, 12078, 628, 1623...
where.append( (fobj._rec_name,'like',nodename) )
where.append( (fobj._rec_name,'=',nodename) )
def _file_get(self, nodename=False): if not self.object: return [] pool = pooler.get_pool(self.cr.dbname) fobj = pool.get('ir.attachment') res2 = [] where = [] if self.object2: where.append( ('res_model','=',self.object2._name) ) where.append( ('res_id','=',self.object2.id) ) else: where.append( ('parent_id','=',self.object.id) ) where.append( ('res_id','=',False) ) if nodename: where.append( (fobj._rec_name,'like',nodename) ) for content in self.object.content_ids: if self.object2 or not content.include_name: if content.include_name: test_nodename = self.object2.name + (content.suffix or '') + (content.extension or '') else: test_nodename = (content.suffix or '') + (content.extension or '') if test_nodename.find('/'): test_nodename=test_nodename.replace('/', '_') path = self.path+'/'+test_nodename if not nodename: n = node_class(self.cr, self.uid,path, self.object2, False, context=self.context, content=content, type='content', root=False) res2.append( n) else: if nodename == test_nodename: n = node_class(self.cr, self.uid, path, self.object2, False, context=self.context, content=content, type='content', root=False) res2.append(n)
1ef4bfec1ded630a1d4663031259aa959d8ba296 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7397/1ef4bfec1ded630a1d4663031259aa959d8ba296/document.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 768, 67, 588, 12, 2890, 16, 14003, 1069, 33, 8381, 4672, 309, 486, 365, 18, 1612, 30, 327, 5378, 2845, 273, 2845, 264, 18, 588, 67, 6011, 12, 2890, 18, 3353, 18, 20979, 13, 2470...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 768, 67, 588, 12, 2890, 16, 14003, 1069, 33, 8381, 4672, 309, 486, 365, 18, 1612, 30, 327, 5378, 2845, 273, 2845, 264, 18, 588, 67, 6011, 12, 2890, 18, 3353, 18, 20979, 13, 2470...
This is the results of your model run of \textbf{ABM-U} for the
This is the results of your model run of \textbf{ABM-B} for the
def latexBody(self): return r""" This is the results of your model run of \textbf{ABM-U} for the Natural Phenomenon Simulation Group (NPSG) at University of Waterloo.
7518b9371fc42cae5b7ff818acf3a636401881ae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13808/7518b9371fc42cae5b7ff818acf3a636401881ae/abmb_c.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 25079, 2250, 12, 2890, 4672, 327, 436, 8395, 1220, 353, 326, 1686, 434, 3433, 938, 1086, 434, 521, 955, 17156, 95, 2090, 49, 17, 38, 97, 364, 326, 423, 270, 3766, 4360, 275, 362, 275, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 25079, 2250, 12, 2890, 4672, 327, 436, 8395, 1220, 353, 326, 1686, 434, 3433, 938, 1086, 434, 521, 955, 17156, 95, 2090, 49, 17, 38, 97, 364, 326, 423, 270, 3766, 4360, 275, 362, 275, ...