rem
stringlengths
2
226k
add
stringlengths
0
227k
context
stringlengths
8
228k
meta
stringlengths
156
215
input_ids
list
attention_mask
list
labels
list
selections):
selections, log=sys.stdout):
def _alignment(self, pdb_hierarchy, pdb_hierarchy_ref, params, selections): res_match_hash = {} model_mseq_res_hash = {} model_seq, model_structures = self.extract_sequence_and_sites( pdb_hierarchy=pdb_hierarchy, selection=selections[0]) ref_mseq_res_hash = {} ref_seq, ref_structures = self.extract_sequence_and_sites( pdb_hierarchy = pdb_hierarchy_ref, selection=selections[1]) for struct in model_structures: model_mseq_res_hash[struct.i_seq] = struct.rg.atoms()[0].pdb_label_columns()[4:] for struct in ref_structures: ref_mseq_res_hash[struct.i_seq] = struct.rg.atoms()[0].pdb_label_columns()[4:] align_obj = mmtbx.alignment.align( seq_a = model_seq, seq_b = ref_seq, gap_opening_penalty = params.alignment.gap_opening_penalty, gap_extension_penalty = params.alignment.gap_extension_penalty, similarity_function = params.alignment.similarity_matrix, style = params.alignment.alignment_style) alignment = align_obj.extract_alignment() matches = alignment.matches() exact_match_selections = alignment.exact_match_selections() exact_a = tuple(exact_match_selections[0]) exact_b = tuple(exact_match_selections[1]) for i, i_seq in enumerate(exact_a): res_match_hash[model_mseq_res_hash[i_seq]] = ref_mseq_res_hash[exact_b[i]] return res_match_hash
907e426fd41ff5a0c9492ce2a493a7074b2e48aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/696/907e426fd41ff5a0c9492ce2a493a7074b2e48aa/reference_model.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 14409, 12, 2890, 16, 10892, 67, 17937, 16, 10892, 67, 17937, 67, 1734, 16, 859, 16, 21738, 16, 613, 33, 9499, 18, 10283, 4672, 400, 67, 1916, 67, 2816, 273, 2618, 938, 67, 81, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 14409, 12, 2890, 16, 10892, 67, 17937, 16, 10892, 67, 17937, 67, 1734, 16, 859, 16, 21738, 16, 613, 33, 9499, 18, 10283, 4672, 400, 67, 1916, 67, 2816, 273, 2618, 938, 67, 81, 5...
self.assert_(set(self._box.list_folders()) == set(('two', 'three')))
self.assertEqual(set(self._box.list_folders()), set(('two', 'three')))
def test_add_and_remove_folders(self): # Delete folders self._box.add_folder('one') self._box.add_folder('two') self.assertEqual(len(self._box.list_folders()), 2) self.assert_(set(self._box.list_folders()) == set(('one', 'two'))) self._box.remove_folder('one') self.assertEqual(len(self._box.list_folders()), 1) self.assertEqual(set(self._box.list_folders()), set(('two',))) self._box.add_folder('three') self.assertEqual(len(self._box.list_folders()), 2) self.assert_(set(self._box.list_folders()) == set(('two', 'three'))) self._box.remove_folder('three') self.assertEqual(len(self._box.list_folders()), 1) self.assertEqual(set(self._box.list_folders()), set(('two',))) self._box.remove_folder('two') self.assertEqual(len(self._box.list_folders()), 0) self.assertEqual(self._box.list_folders(), [])
9d1fae6c21b9661e8074aa8a166635ba69122b4b /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/12029/9d1fae6c21b9661e8074aa8a166635ba69122b4b/test_mailbox.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 1289, 67, 464, 67, 4479, 67, 16064, 12, 2890, 4672, 468, 2504, 9907, 365, 6315, 2147, 18, 1289, 67, 5609, 2668, 476, 6134, 365, 6315, 2147, 18, 1289, 67, 5609, 2668, 15415, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 1289, 67, 464, 67, 4479, 67, 16064, 12, 2890, 4672, 468, 2504, 9907, 365, 6315, 2147, 18, 1289, 67, 5609, 2668, 476, 6134, 365, 6315, 2147, 18, 1289, 67, 5609, 2668, 15415, 6...
if re_plugin_meta.search(plugin_name):
if re_plugin_meta.search(plugin_name) != None:
def parse_plugin_name(self): buff = self.buffer.strip() ParseDbg.add("parse_plugin_name buff=%s" % buff) plugin_match = re_plugin.match(buff) if plugin_match: plugin_name = C.cname(plugin_match.group(1)) ParseDbg.add("parse_plugin_name name=%s" % plugin_name) pos = plugin_match.span(2)[1] self.buffer = buff[pos:].lstrip() # if the plugin name contains metacharacters, do filename expansion if re_plugin_meta.search(plugin_name): pat = plugin_name ParseDbg.add("parse_plugin_name name has META: %s" % pat) matches = [] pat = re_plugin_meta.sub(r".\1", pat) ParseDbg.add("parse_plugin_name new RE pat: %s" % pat) re_namepat = re.compile(pat, re.IGNORECASE) for p in self.active: if re_namepat.match(p): matches.append(p) ParseDbg.add("parse_plugin_name matching name: %s" % p) if len(matches) > 0: plugin_name = matches.pop(0) ParseDbg.add("parse_plugin_name new name=%s" % plugin_name) if len(matches) > 0: self.buffer = " ".join(matches) + " " + self.buffer ParseDbg.add("parse_plugin_name new buff=\"%s\"" % self.buffer) exists = plugin_name in self.active return(exists, plugin_name) else: self.parse_error("expected a plugin name: \"%s\"" % buff) self.buffer = "" return(None, None)
41faf84623d50848b1bfd856a822761543084216 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/2827/41faf84623d50848b1bfd856a822761543084216/mlox.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 67, 4094, 67, 529, 12, 2890, 4672, 6139, 273, 365, 18, 4106, 18, 6406, 1435, 2884, 4331, 75, 18, 1289, 2932, 2670, 67, 4094, 67, 529, 6139, 5095, 87, 6, 738, 6139, 13, 1909, 67...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1109, 67, 4094, 67, 529, 12, 2890, 4672, 6139, 273, 365, 18, 4106, 18, 6406, 1435, 2884, 4331, 75, 18, 1289, 2932, 2670, 67, 4094, 67, 529, 6139, 5095, 87, 6, 738, 6139, 13, 1909, 67...
required=False,
required=True,
def setPassword(password): """Set the password in a hashed form, so it can be verified later.
ea48c5a58838d301e90d3644f0327a682ebe7df5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/7127/ea48c5a58838d301e90d3644f0327a682ebe7df5/interfaces.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 23753, 12, 3664, 4672, 3536, 694, 326, 2201, 316, 279, 14242, 646, 16, 1427, 518, 848, 506, 13808, 5137, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 23753, 12, 3664, 4672, 3536, 694, 326, 2201, 316, 279, 14242, 646, 16, 1427, 518, 848, 506, 13808, 5137, 18, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -1...
> <doctest foo-bär@baz[1]>(1)<module>()->None
> <doctest foo-bär@baz[2]>(1)<module>()->None
def test_pdb_set_trace(): """Using pdb.set_trace from a doctest. You can use pdb.set_trace from a doctest. To do so, you must retrieve the set_trace function from the pdb module at the time you use it. The doctest module changes sys.stdout so that it can capture program output. It also temporarily replaces pdb.set_trace with a version that restores stdout. This is necessary for you to see debugger output. >>> doc = ''' ... >>> x = 42 ... >>> import pdb; pdb.set_trace() ... ''' >>> parser = doctest.DocTestParser() >>> test = parser.get_doctest(doc, {}, "foo-bär@baz", "foo-bär@baz.py", 0) >>> runner = doctest.DocTestRunner(verbose=False) To demonstrate this, we'll create a fake standard input that captures our debugger input: >>> import tempfile >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'print(x)', # print data defined by the example ... 'continue', # stop debugging ... '']) >>> try: runner.run(test) ... finally: sys.stdin = real_stdin --Return-- > <doctest foo-bär@baz[1]>(1)<module>()->None -> import pdb; pdb.set_trace() (Pdb) print(x) 42 (Pdb) continue TestResults(failed=0, attempted=2) You can also put pdb.set_trace in a function called from a test: >>> def calls_set_trace(): ... y=2 ... import pdb; pdb.set_trace() >>> doc = ''' ... >>> x=1 ... >>> calls_set_trace() ... ''' >>> test = parser.get_doctest(doc, globals(), "foo-bär@baz", "foo-bär@baz.py", 0) >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'print(y)', # print data defined in the function ... 'up', # out of function ... 'print(x)', # print data defined by the example ... 'continue', # stop debugging ... '']) >>> try: ... runner.run(test) ... finally: ... sys.stdin = real_stdin --Return-- > <doctest test.test_doctest.test_pdb_set_trace[8]>(3)calls_set_trace()->None -> import pdb; pdb.set_trace() (Pdb) print(y) 2 (Pdb) up > <doctest foo-bär@baz[1]>(1)<module>() -> calls_set_trace() (Pdb) print(x) 1 (Pdb) continue TestResults(failed=0, attempted=2) During interactive debugging, source code is shown, even for doctest examples: >>> doc = ''' ... >>> def f(x): ... ... g(x*2) ... >>> def g(x): ... ... print(x+3) ... ... import pdb; pdb.set_trace() ... >>> f(3) ... ''' >>> test = parser.get_doctest(doc, globals(), "foo-bär@baz", "foo-bär@baz.py", 0) >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'list', # list source from example 2 ... 'next', # return from g() ... 'list', # list source from example 1 ... 'next', # return from f() ... 'list', # list source from example 3 ... 'continue', # stop debugging ... '']) >>> try: runner.run(test) ... finally: sys.stdin = real_stdin ... # doctest: +NORMALIZE_WHITESPACE --Return-- > <doctest foo-bär@baz[1]>(3)g()->None -> import pdb; pdb.set_trace() (Pdb) list 1 def g(x): 2 print(x+3) 3 -> import pdb; pdb.set_trace() [EOF] (Pdb) next --Return-- > <doctest foo-bär@baz[0]>(2)f()->None -> g(x*2) (Pdb) list 1 def f(x): 2 -> g(x*2) [EOF] (Pdb) next --Return-- > <doctest foo-bär@baz[2]>(1)<module>()->None -> f(3) (Pdb) list 1 -> f(3) [EOF] (Pdb) continue ********************************************************************** File "foo-bär@baz.py", line 7, in foo-bär@baz Failed example: f(3) Expected nothing Got: 9 TestResults(failed=1, attempted=3) """
5ece1f98a89eb55deeffd6315d097bbf56f9c6e0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12029/5ece1f98a89eb55deeffd6315d097bbf56f9c6e0/test_doctest.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 17414, 67, 542, 67, 5129, 13332, 3536, 7736, 10892, 18, 542, 67, 5129, 628, 279, 31263, 395, 18, 225, 4554, 848, 999, 10892, 18, 542, 67, 5129, 628, 279, 31263, 395, 18, 225,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 17414, 67, 542, 67, 5129, 13332, 3536, 7736, 10892, 18, 542, 67, 5129, 628, 279, 31263, 395, 18, 225, 4554, 848, 999, 10892, 18, 542, 67, 5129, 628, 279, 31263, 395, 18, 225,...
self.pml.raiseError("You forgot to close a \"" + last_tab_keyword[0] + "\" in " + self.template + " near line: " + str(last_tab_keyword[1]) + "\n")
self.pml.raise_error("You forgot to close a \"%s\" in %s near line: %s" % (last_tab_keyword[0], self.template, str(last_tab_keyword[1])))
def compile_pyblock(self, block): buffer = "" for line in re.compile("\<\?\s*(.*?)\s+?\?\>", re.IGNORECASE | re.DOTALL).split(block)[1].splitlines(): keywords = line.strip().split(" ") keyword = keywords[0].split(":")[0].strip().lower() if hasattr(self.keyword_handler, "handle_" + keyword): buffer += getattr(self.keyword_handler, "handle_" + keyword)(line, keyword, keywords) elif keyword.startswith("="): buffer += self.format_pyline("sys.stdout.write(" + keyword.split("=")[1].strip() + ")") elif keyword in self.tab_keywords: stack_item = (keyword, self.current_line) self._keyword_stack.append(stack_item) buffer += self.format_pyline(line) self.tab_depth += 1 elif keyword in self.untab_keywords: last_tab_keyword = self._keyword_stack.pop() if keyword != "#" + last_tab_keyword[0]: self.pml.raiseError("You forgot to close a \"" + last_tab_keyword[0] + "\" in " + self.template + " near line: " + str(last_tab_keyword[1]) + "\n") self.tab_depth -= 1 else: buffer += self.format_pyline(line) return buffer
70bce77637506cd6ca12d2c9c70c85321ff65223 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12317/70bce77637506cd6ca12d2c9c70c85321ff65223/pml.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4074, 67, 2074, 2629, 12, 2890, 16, 1203, 4672, 1613, 273, 1408, 225, 364, 980, 316, 283, 18, 11100, 31458, 31428, 10936, 87, 14, 27482, 87, 15, 10936, 10936, 2984, 16, 283, 18, 20118, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4074, 67, 2074, 2629, 12, 2890, 16, 1203, 4672, 1613, 273, 1408, 225, 364, 980, 316, 283, 18, 11100, 31458, 31428, 10936, 87, 14, 27482, 87, 15, 10936, 10936, 2984, 16, 283, 18, 20118, ...
return self._getCachedInfo()['bio']['content']
return self._getCachedInfo('bio', 'content')
def getBioContent(self): """Returns the content of the artist's biography. """ return self._getCachedInfo()['bio']['content']
ec1304614ea5bc838c2f5bb83444fc59ac9c26d8 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9926/ec1304614ea5bc838c2f5bb83444fc59ac9c26d8/pylast.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2882, 1594, 1350, 12, 2890, 4672, 3536, 1356, 326, 913, 434, 326, 15469, 1807, 10054, 15669, 18, 3536, 225, 327, 365, 6315, 588, 9839, 966, 2668, 21010, 2187, 296, 1745, 6134, 225, 2, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2882, 1594, 1350, 12, 2890, 4672, 3536, 1356, 326, 913, 434, 326, 15469, 1807, 10054, 15669, 18, 3536, 225, 327, 365, 6315, 588, 9839, 966, 2668, 21010, 2187, 296, 1745, 6134, 225, 2, -1...
def dual_free_module(self, bound=None, anemic=True): """
def dual_free_module(self, bound=None, anemic=True, use_star=True): r"""
def dual_free_module(self, bound=None, anemic=True): """ Compute embedded dual free module if possible. In general this won't be possible, e.g., if this space is not Hecke equivariant, possibly if it is not cuspidal, or if the characteristic is not 0. In all these cases we raise a RuntimeError exception. """ try: return self.__dual_free_module except AttributeError: if self.dimension() == 0: self.__dual_free_module = self.ambient_hecke_module().dual_free_module().zero_submodule() return self.__dual_free_module # ALGORITHM: Compute the char poly of each Hecke operator on the # submodule, then use it to cut out a submodule of the dual. If # the dimension cuts down to the dimension of self terminate # with success. If it stays bigger beyond the bound (Sturm) # bound, raise a RuntimeError exception. misc.verbose("computing") N = self.level() A = self.ambient_hecke_module() if A.dimension() == self.dimension(): self.__dual_free_module = A.free_module() return self.__dual_free_module V = A.free_module() p = 2 if bound is None: bound = A.hecke_bound() while True: misc.verbose("using T_%s"%p) if anemic: while N % p == 0: p = arith.next_prime(p) f = self.hecke_polynomial(p) T = A.dual_hecke_matrix(p) V = T.kernel_on(V, poly=f, check=False) if V.dimension() <= self.dimension(): break p = arith.next_prime(p) if p > bound: break
44390c2f3d28a6980046467c7676b7049c7f3ea1 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/9890/44390c2f3d28a6980046467c7676b7049c7f3ea1/submodule.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 24557, 67, 9156, 67, 2978, 12, 2890, 16, 2489, 33, 7036, 16, 392, 351, 335, 33, 5510, 16, 999, 67, 10983, 33, 5510, 4672, 436, 8395, 8155, 7488, 24557, 4843, 1605, 309, 3323, 18, 225, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 24557, 67, 9156, 67, 2978, 12, 2890, 16, 2489, 33, 7036, 16, 392, 351, 335, 33, 5510, 16, 999, 67, 10983, 33, 5510, 4672, 436, 8395, 8155, 7488, 24557, 4843, 1605, 309, 3323, 18, 225, ...
ok, statusRemainder = statusLine.split(None, 1)
statusData = statusLine.split() ok = statusData[0]
def onRetr(self, command, args, response): """Adds the judgement header based on the raw headers and body of the message.""" # Previously, we used '\n\r?\n' to detect the end of the headers in # case of broken emails that don't use the proper line separators, # and if we couldn't find it, then we assumed that the response was # and error response and passed it unfiltered. However, if the # message doesn't contain the separator (malformed mail), then this # would mean the message was passed straight through the proxy. # Since all the content is then in the headers, this probably # doesn't do a spammer much good, but, just in case, we now just # check for "+OK" and assume no error response will be given if # that is (which seems reasonable). # Remove the trailing .\r\n before passing to the email parser. # Thanks to Scott Schlesier for this fix. terminatingDotPresent = (response[-4:] == '\n.\r\n') if terminatingDotPresent: response = response[:-3]
1ad23526bab61f9198a584dd662e544c1df01577 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/6126/1ad23526bab61f9198a584dd662e544c1df01577/sb_server.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 7055, 86, 12, 2890, 16, 1296, 16, 833, 16, 766, 4672, 3536, 3655, 326, 525, 1100, 75, 820, 1446, 2511, 603, 326, 1831, 1607, 471, 1417, 434, 326, 883, 12123, 468, 19369, 715, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 603, 7055, 86, 12, 2890, 16, 1296, 16, 833, 16, 766, 4672, 3536, 3655, 326, 525, 1100, 75, 820, 1446, 2511, 603, 326, 1831, 1607, 471, 1417, 434, 326, 883, 12123, 468, 19369, 715, 16, ...
except ExtractError, e:
except ExtractError: e = sys.exc_info()[1]
def sorter(dir1, dir2): return cmp(dir1.name, dir2.name)
7785ccade4b92867c492f573e9b548c1ba612cd7 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/495/7785ccade4b92867c492f573e9b548c1ba612cd7/distribute_setup.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 19867, 12, 1214, 21, 16, 1577, 22, 4672, 327, 9411, 12, 1214, 21, 18, 529, 16, 1577, 22, 18, 529, 13, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 19867, 12, 1214, 21, 16, 1577, 22, 4672, 327, 9411, 12, 1214, 21, 18, 529, 16, 1577, 22, 18, 529, 13, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
head_size -= 1
def createFields(self): yield Bit(self, "continued_from", "File continued from previous volume") yield Bit(self, "continued_in", "File continued in next volume") yield Bit(self, "encrypted", "File encrypted with password") yield Bit(self, "has_comment", "File comment present") yield Bit(self, "is_solid", "Information from previous files is used (solid flag)") yield Enum(Bits(self, "dictionary_size", 3, "Dictionary size"), self.dictionary_size) yield Bit(self, "has_extended_size", "Additional field indicating body size") yield Bit(self, "is_ignorable", "Old versions of RAR should ignore this block when copying data") yield Bit(self, "is_large", "file64 operations needed") yield Bit(self, "is_unicode", "Filename also encoded using Unicode") yield Bit(self, "has_salt", "Has salt for encryption") yield Bit(self, "uses_file_version", "File versioning is used") yield Bit(self, "bexttime", "Extra time ??") yield Bit(self, "bextflag", "Extra flag ??") head_size = UInt16(self, "head_size", "File header full size including file name and comments", text_handler=humanFilesize) yield head_size head_size = head_size.value - (2+1+2+2) yield UInt32(self, "compressed_size", "Compressed size (bytes)", text_handler=humanFilesize) yield UInt32(self, "uncompressed_size", "Uncompressed size (bytes)", text_handler=humanFilesize) os = UInt8(self, "host_os", "Operating system used for archiving") yield Enum(os, self.host_os) os = os.value yield UInt32(self, "file_crc", "File CRC32", text_handler=hexadecimal) yield UInt32(self, "ftime", "Date and time (MS DOS format)", text_handler=timestampMSDOS) yield UInt8(self, "version", "RAR version needed to extract file", text_handler=RarVersion) yield Enum(UInt8(self, "method", "Packing method"), BaseBlock.compression_name) size = UInt16(self, "filename_length", "File name size", text_handler=humanFilesize) yield size size = size.value if os==0 or os==2: yield UInt32(self, "file_attr", "File attributes", text_handler=MSDOSFileAttr) else: yield UInt32(self, "attr", "File attributes", text_handler=hexadecimal) head_size -= 4+4+1+4+4+1+1+2+4 if self["is_large"].value: yield UInt64(self, "large_size", "Extended 64bits filesize", text_handler=humanFilesize) head_size -= 8 if self["is_unicode"].value: ParserError("Can't handle unicode filenames.") if self["has_salt"].value: yield UInt8(self, "salt", "Encryption salt value") head_size -= 1 if self["bexttime"].value: yield UInt16(self, "time_flags", "Flags for extended time", text_handler=hexadecimal) # Needs to be decoded more if size > 0: yield String(self, "filename", size, "Filename") head_size -= size
9be6b66d3e7ceacca1ad4a124689b228fec9523a /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9327/9be6b66d3e7ceacca1ad4a124689b228fec9523a/rar.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 2314, 12, 2890, 4672, 2824, 6539, 12, 2890, 16, 315, 1213, 267, 5957, 67, 2080, 3113, 315, 812, 17545, 5957, 628, 2416, 3940, 7923, 2824, 6539, 12, 2890, 16, 315, 1213, 267, 5957, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 2314, 12, 2890, 4672, 2824, 6539, 12, 2890, 16, 315, 1213, 267, 5957, 67, 2080, 3113, 315, 812, 17545, 5957, 628, 2416, 3940, 7923, 2824, 6539, 12, 2890, 16, 315, 1213, 267, 5957, ...
"""Fails if the string 'str1' contains the string 'str2' one or more times.
"""Fails if the string 'str1' contains the string 'str2' one or more times.
def should_not_contain(self, str1, str2, msg=None, values=True): """Fails if the string 'str1' contains the string 'str2' one or more times. See 'Should Be Equal' for an explanation on how to override the default error message with 'msg' and 'values'. """ msg = self._get_string_msg(str1, str2, msg, values, 'contains') asserts.fail_if(str1.count(str2) > 0, msg)
dfa75ac2320d3c97bf00a5f0b376db63803b6c8e /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/6988/dfa75ac2320d3c97bf00a5f0b376db63803b6c8e/BuiltIn.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1410, 67, 902, 67, 28744, 12, 2890, 16, 609, 21, 16, 609, 22, 16, 1234, 33, 7036, 16, 924, 33, 5510, 4672, 3536, 30800, 309, 326, 533, 296, 701, 21, 11, 1914, 326, 533, 296, 701, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1410, 67, 902, 67, 28744, 12, 2890, 16, 609, 21, 16, 609, 22, 16, 1234, 33, 7036, 16, 924, 33, 5510, 4672, 3536, 30800, 309, 326, 533, 296, 701, 21, 11, 1914, 326, 533, 296, 701, 2...
else: self._syntax_error('<onDraw/> needs at least a name attribute')
else: self._syntax_error('<onDraw> needs at least a name attribute')
def start_onDraw(self,attr): defn = ParaFrag() if attr.has_key('name'): defn.name = attr['name'] else: self._syntax_error('<onDraw/> needs at least a name attribute')
7b462be10fe3d340691a7a96fb9631ffef0a3443 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3878/7b462be10fe3d340691a7a96fb9631ffef0a3443/paraparser.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 787, 67, 265, 6493, 12, 2890, 16, 1747, 4672, 1652, 82, 273, 2280, 69, 19509, 1435, 309, 1604, 18, 5332, 67, 856, 2668, 529, 11, 4672, 1652, 82, 18, 529, 273, 1604, 3292, 529, 3546, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 787, 67, 265, 6493, 12, 2890, 16, 1747, 4672, 1652, 82, 273, 2280, 69, 19509, 1435, 309, 1604, 18, 5332, 67, 856, 2668, 529, 11, 4672, 1652, 82, 18, 529, 273, 1604, 3292, 529, 3546, ...
return self.label_buttongroup.checkedButton() def create_choice_list(self, list_name, elements = []): vlayout = QVBoxLayout() vlayout.setSpacing(0) vlayout.addWidget(QLabel("<center><b>" + list_name + "</b></center>")) if len(elements) > 0: buttongroup = QButtonGroup() for element in elements: self.create_button(element) return vlayout
return self.label_button_list.get_checked_button()
def get_checked_label_button(self): return self.label_buttongroup.checkedButton()
142f548695693cef661e597f3671d76c6a78dea0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/142/142f548695693cef661e597f3671d76c6a78dea0/buttonarea.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 4532, 67, 1925, 67, 5391, 12, 2890, 4672, 327, 365, 18, 1925, 67, 12885, 88, 932, 656, 18, 4532, 3616, 1435, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 4532, 67, 1925, 67, 5391, 12, 2890, 4672, 327, 365, 18, 1925, 67, 12885, 88, 932, 656, 18, 4532, 3616, 1435, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -10...
return type( credDict[ 'group' ] ) == types.TupleType and \
return 'group' in credDict and type( credDict[ 'group' ] ) == types.TupleType and \
def forwardedCredentials( self, credDict ): trustedHostsList = gConfig.getValue( "/DIRAC/Security/TrustedHosts", [] ) return type( credDict[ 'group' ] ) == types.TupleType and \ 'DN' in credDict and \ credDict[ 'DN' ] in trustedHostsList
02c2490d347256c543c27bb2d8d4981e4f0ac061 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/12864/02c2490d347256c543c27bb2d8d4981e4f0ac061/AuthManager.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 19683, 6163, 12, 365, 16, 6197, 5014, 262, 30, 13179, 12172, 682, 273, 314, 809, 18, 24805, 12, 2206, 4537, 2226, 19, 4368, 19, 16950, 12172, 3113, 5378, 262, 327, 296, 1655, 11, 316, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 19683, 6163, 12, 365, 16, 6197, 5014, 262, 30, 13179, 12172, 682, 273, 314, 809, 18, 24805, 12, 2206, 4537, 2226, 19, 4368, 19, 16950, 12172, 3113, 5378, 262, 327, 296, 1655, 11, 316, ...
find_plugins(doc.firstChild)
for plugin in plugins: out.write('\'%s\', ' % plugin.strip('. ,'))
def find_plugins(node, position=''): for child in node.childNodes: if not child.nodeName == 'group': continue if child.hasAttribute('is_plugin'): name = "%s%s" % (position, child.getAttribute('name')) out.write('\'%s\', ' % name.strip('. ,')) find_plugins(child, position + child.getAttribute('name') + '.')
d9ac152ca72e84f19dca7bca1ffe5fc1c248f73f /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/11399/d9ac152ca72e84f19dca7bca1ffe5fc1c248f73f/xmlconfig.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 67, 8057, 12, 2159, 16, 1754, 2218, 11, 4672, 364, 1151, 316, 756, 18, 3624, 3205, 30, 309, 486, 1151, 18, 2159, 461, 422, 296, 1655, 4278, 1324, 309, 1151, 18, 5332, 1499, 2668,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1104, 67, 8057, 12, 2159, 16, 1754, 2218, 11, 4672, 364, 1151, 316, 756, 18, 3624, 3205, 30, 309, 486, 1151, 18, 2159, 461, 422, 296, 1655, 4278, 1324, 309, 1151, 18, 5332, 1499, 2668,...
if not self._metadata_popupated:
if not self._metadata_populated:
def version(self): if not self._metadata_popupated: self.parse_metadata() return self._version
d7301a0b869a76845ebfdf478ff07d95dc243e3a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13693/d7301a0b869a76845ebfdf478ff07d95dc243e3a/source.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1177, 12, 2890, 4672, 309, 486, 365, 6315, 4165, 67, 5120, 11799, 30, 365, 18, 2670, 67, 4165, 1435, 327, 365, 6315, 1589, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1177, 12, 2890, 4672, 309, 486, 365, 6315, 4165, 67, 5120, 11799, 30, 365, 18, 2670, 67, 4165, 1435, 327, 365, 6315, 1589, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
if matcher(path):
if matcher(path[baselen:]):
def visit(ignored, dir, files): if os.path.basename(dir) not in test_dirs: return if '__init__.py' not in files: print >> sys.stderr, "%s is not a package" % dir return for file in files: if file.startswith('test') and file.endswith('.py'): path = os.path.join(dir, file) if matcher(path): results.append(path)
ae0ebb20226646d3db8007834d0f4dd048aeacb6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/7127/ae0ebb20226646d3db8007834d0f4dd048aeacb6/test.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3757, 12, 24055, 16, 1577, 16, 1390, 4672, 309, 1140, 18, 803, 18, 13909, 12, 1214, 13, 486, 316, 1842, 67, 8291, 30, 327, 309, 4940, 2738, 25648, 2074, 11, 486, 316, 1390, 30, 1172, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3757, 12, 24055, 16, 1577, 16, 1390, 4672, 309, 1140, 18, 803, 18, 13909, 12, 1214, 13, 486, 316, 1842, 67, 8291, 30, 327, 309, 4940, 2738, 25648, 2074, 11, 486, 316, 1390, 30, 1172, ...
"""
"""
def onUpdatePreset(self, event = None): """ Update the GUI based on the selected preset """ sel = self.preset.GetSelection() flag = (sel == 0) self.formatMenu.Enable(flag) self.outputFormat.Enable(flag) self.encoder.setPreset(sel) if not flag: oldformat1 = self.formatMenu.GetStringSelection() oldformat2 = self.outputFormat.GetStringSelection() self.oldformat = (oldformat1, oldformat2) self.formatMenu.SetStringSelection("Video") self.outputFormat.SetStringSelection("MPEG2") else: if self.oldformat: oldformat1, oldformat2 = self.oldformat self.formatMenu.SetStringSelection(oldformat1) self.outputFormat.SetStringSelection(oldformat2)
9804318c9256b2a84b102aa79469825f7234a484 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/2877/9804318c9256b2a84b102aa79469825f7234a484/VideoGeneration.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 27728, 18385, 12, 2890, 16, 871, 273, 599, 4672, 3536, 2315, 326, 10978, 2511, 603, 326, 3170, 12313, 3536, 357, 273, 365, 18, 27524, 18, 967, 6233, 1435, 2982, 273, 261, 1786, 422, 374,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 27728, 18385, 12, 2890, 16, 871, 273, 599, 4672, 3536, 2315, 326, 10978, 2511, 603, 326, 3170, 12313, 3536, 357, 273, 365, 18, 27524, 18, 967, 6233, 1435, 2982, 273, 261, 1786, 422, 374,...
class ReviewRequestStarColumn(StarColumn):
class ReviewGroupStarColumn(StarColumn):
def render_data(self, obj): obj.starred = self.all_starred.get(obj.id, False) return render_star(self.datagrid.request.user, obj)
accfa93342fed2272bf990d4515a41fc3996a4ff /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1600/accfa93342fed2272bf990d4515a41fc3996a4ff/datagrids.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1743, 67, 892, 12, 2890, 16, 1081, 4672, 1081, 18, 10983, 1118, 273, 365, 18, 454, 67, 10983, 1118, 18, 588, 12, 2603, 18, 350, 16, 1083, 13, 327, 1743, 67, 10983, 12, 2890, 18, 3404...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1743, 67, 892, 12, 2890, 16, 1081, 4672, 1081, 18, 10983, 1118, 273, 365, 18, 454, 67, 10983, 1118, 18, 588, 12, 2603, 18, 350, 16, 1083, 13, 327, 1743, 67, 10983, 12, 2890, 18, 3404...
optionflags=doctest.ELLIPSIS + \ doctest.REPORT_ONLY_FIRST_FAILURE)
globs={'interact': interact}, optionflags=doctest.ELLIPSIS + \ doctest.REPORT_ONLY_FIRST_FAILURE)
def test_suite(): return doctest.DocFileSuite('xmlwriter.txt', optionflags=doctest.ELLIPSIS + \ doctest.REPORT_ONLY_FIRST_FAILURE)
015e54337f11c8a1c2172191cc0a8925a91a81b9 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/3797/015e54337f11c8a1c2172191cc0a8925a91a81b9/tests.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 30676, 13332, 327, 31263, 395, 18, 1759, 812, 13587, 2668, 2902, 6299, 18, 5830, 2187, 4715, 87, 5899, 11, 2761, 621, 4278, 16592, 5779, 1456, 7133, 33, 2896, 299, 395, 18, 224...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1842, 67, 30676, 13332, 327, 31263, 395, 18, 1759, 812, 13587, 2668, 2902, 6299, 18, 5830, 2187, 4715, 87, 5899, 11, 2761, 621, 4278, 16592, 5779, 1456, 7133, 33, 2896, 299, 395, 18, 224...
print "Caffiene is now dormant; powersaving is re-enabled"
print "Caffeine is now dormant; powersaving is re-enabled"
def attemptToToggleSleepPrevention(): """This function may fail to peform the toggling, if it cannot find the required bus. In this case, it will return False.""" global sleepPrevented, screenSaverCookie, powerManagementCookie, timer bus = dbus.SessionBus() if sleepPrevented: ssProxy = None if 'org.gnome.ScreenSaver' in bus.list_names(): # For Gnome ssProxy = bus.get_object('org.gnome.ScreenSaver', '/org/gnome/ScreenSaver') elif 'org.freedesktop.ScreenSaver' in bus.list_names() and \ 'org.freedesktop.PowerManagement.Inhibit' in bus.list_names(): # For KDE ssProxy = bus.get_object('org.freedesktop.ScreenSaver', '/ScreenSaver') pmProxy = bus.get_object('org.freedesktop.PowerManagement.Inhibit', '/org/freedesktop/PowerManagement/Inhibit') if powerManagementCookie != None: pmProxy.UnInhibit(powerManagementCookie) else: return False if screenSaverCookie != None: ssProxy.UnInhibit(screenSaverCookie) sleepPrevented = False print "Caffiene is now dormant; powersaving is re-enabled" # If the user clicks on the full coffee-cup to disable sleep prevention, it should also # cancel the timer for timed activation. if timer != None: print "Cancelling the 'timed activation' timer (was set for " + str(timer.interval) + " seconds)" timer.cancel() timer = None else: probableWindowManager = "" ssProxy = None if 'org.gnome.ScreenSaver' in bus.list_names(): # For Gnome probableWindowManager = "Gnome" ssProxy = bus.get_object('org.gnome.ScreenSaver', '/org/gnome/ScreenSaver') elif 'org.freedesktop.ScreenSaver' in bus.list_names() and \ 'org.freedesktop.PowerManagement.Inhibit' in bus.list_names(): # For KDE probableWindowManager = "KDE" ssProxy = bus.get_object('org.freedesktop.ScreenSaver', '/ScreenSaver') pmProxy = bus.get_object('org.freedesktop.PowerManagement.Inhibit', '/org/freedesktop/PowerManagement/Inhibit') powerManagementCookie = pmProxy.Inhibit("Caffeine", "User has requested that Caffeine disable the powersaving modes") else: return False screenSaverCookie = ssProxy.Inhibit("Caffeine", "User has requested that Caffeine disable the screen saver") sleepPrevented = True print "Caffiene is now preventing powersaving modes and screensaver activation (" + probableWindowManager + ")" return True
1da1a56726ea58548f0f41a14524fbd7d1f2e84d /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7152/1da1a56726ea58548f0f41a14524fbd7d1f2e84d/caffeine.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4395, 774, 17986, 20768, 1386, 11111, 13332, 3536, 2503, 445, 2026, 2321, 358, 2804, 687, 326, 6316, 75, 2456, 16, 309, 518, 2780, 1104, 326, 1931, 5766, 18, 657, 333, 648, 16, 518, 903,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 4395, 774, 17986, 20768, 1386, 11111, 13332, 3536, 2503, 445, 2026, 2321, 358, 2804, 687, 326, 6316, 75, 2456, 16, 309, 518, 2780, 1104, 326, 1931, 5766, 18, 657, 333, 648, 16, 518, 903,...
self._f = self.gen(0,1,2)
self._f = self.gen(*self._a0a1d)
def _precompute(self, how_many=20): try: f = self._f except AttributeError: self._f = self.gen(0,1,2) f = self._f self._b += [f.next() for i in range(how_many)]
8b300fed02e947af57b58146f62dccfc715cf0b9 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9890/8b300fed02e947af57b58146f62dccfc715cf0b9/sloane_functions.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1484, 9200, 12, 2890, 16, 3661, 67, 9353, 33, 3462, 4672, 775, 30, 284, 273, 365, 6315, 74, 1335, 6394, 30, 365, 6315, 74, 273, 365, 18, 4507, 30857, 2890, 6315, 69, 20, 69, 21,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1484, 9200, 12, 2890, 16, 3661, 67, 9353, 33, 3462, 4672, 775, 30, 284, 273, 365, 6315, 74, 1335, 6394, 30, 365, 6315, 74, 273, 365, 18, 4507, 30857, 2890, 6315, 69, 20, 69, 21,...
result += "|" + res[i][3] + "\n" result += "|" + re.sub(r'@\*', "\n*", res[i][4]) + "\n"
result += "| " + res[i][3] + "\n" result += "| " + re.sub(r'@\*', "\n*", res[i][4]) + "\n"
def create_container_table(data): """Creates a table for a container."""
22886e3d2ac9ebe679b9ccd6010e89e8fc51ae1c /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9355/22886e3d2ac9ebe679b9ccd6010e89e8fc51ae1c/wiki_grabber.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 3782, 67, 2121, 12, 892, 4672, 3536, 2729, 279, 1014, 364, 279, 1478, 12123, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 67, 3782, 67, 2121, 12, 892, 4672, 3536, 2729, 279, 1014, 364, 279, 1478, 12123, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -...
return x = zeros(n)
x_is_zero = True istop = 0
def solve(self, rhs, itnlim=0, damp=0.0, atol=1.0e-6, btol=1.0e-7, conlim=1.0e+8, radius=None, show=False, wantvar=False): """ Solve the linear system, linear least-squares problem or regularized linear least-squares problem with specified parameters. All return values below are stored in members of the same name.
73210df496e156d4e6514bbae574265a41b74ad8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13857/73210df496e156d4e6514bbae574265a41b74ad8/lsqr.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 12439, 12, 2890, 16, 7711, 16, 518, 82, 7091, 33, 20, 16, 302, 931, 33, 20, 18, 20, 16, 26322, 33, 21, 18, 20, 73, 17, 26, 16, 324, 3490, 33, 21, 18, 20, 73, 17, 27, 16, 356, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 12439, 12, 2890, 16, 7711, 16, 518, 82, 7091, 33, 20, 16, 302, 931, 33, 20, 18, 20, 16, 26322, 33, 21, 18, 20, 73, 17, 26, 16, 324, 3490, 33, 21, 18, 20, 73, 17, 27, 16, 356, ...
real_double_vector = Extension('sage.modules.real_double_vector',['sage/modules/real_double_vector.pyx'], libraries = ['gsl', BLAS, BLAS2, 'pari','gmp'],define_macros = [('GSL_DISABLE_DEPRECATED','1')],include_dirs=debian_include_dirs + [SAGE_ROOT+'/local/lib/python2.5/site-packages/numpy/core/include/numpy']) complex_double_vector = Extension('sage.modules.complex_double_vector',['sage/modules/complex_double_vector.pyx'], libraries = ['gsl', BLAS, BLAS2, 'pari', 'gmp'],define_macros=[('GSL_DISABLE_DEPRECATED','1')],include_dirs=debian_include_dirs + [SAGE_ROOT+'/local/lib/python2.5/site-packages/numpy/core/include/numpy'])
real_double_vector = Extension('sage.modules.real_double_vector', ['sage/modules/real_double_vector.pyx'], libraries = ['gsl', BLAS, BLAS2, 'pari','gmp'], define_macros = [('GSL_DISABLE_DEPRECATED','1')], include_dirs=debian_include_dirs + [SAGE_ROOT+'/local/lib/python2.5/site-packages/numpy/core/include/numpy']) complex_double_vector = Extension('sage.modules.complex_double_vector', ['sage/modules/complex_double_vector.pyx'], libraries = ['gsl', BLAS, BLAS2, 'pari', 'gmp'], define_macros=[('GSL_DISABLE_DEPRECATED','1')], include_dirs=debian_include_dirs + [SAGE_ROOT+'/local/lib/python2.5/site-packages/numpy/core/include/numpy'])
def is_newer(file1, file2): """ Return True if either file2 does not exist or is older than file1. If file1 does not exist, always return False. """ if not os.path.exists(file1): return False if not os.path.exists(file2): return True if os.path.getmtime(file2) < os.path.getmtime(file1): return True return False
697d48ce28ff4d12606adf26a8fb095216c31678 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9890/697d48ce28ff4d12606adf26a8fb095216c31678/setup.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 67, 2704, 264, 12, 768, 21, 16, 585, 22, 4672, 3536, 2000, 1053, 309, 3344, 585, 22, 1552, 486, 1005, 578, 353, 12156, 2353, 585, 21, 18, 225, 971, 585, 21, 1552, 486, 1005, 16,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 67, 2704, 264, 12, 768, 21, 16, 585, 22, 4672, 3536, 2000, 1053, 309, 3344, 585, 22, 1552, 486, 1005, 578, 353, 12156, 2353, 585, 21, 18, 225, 971, 585, 21, 1552, 486, 1005, 16,...
if isinstance(request.request, dict): keydict.update(request.request)
real_req = request.request if isinstance(real_req, dict): keydict.update(real_req) if getattr(real_req, 'form', None) is not None: if isinstance(real_req.form, dict): keydict.update(real_req.form)
def search(self, request, sort_index=None, reverse=0, limit=None, merge=1): advancedtypes = tuple(ADVANCEDTYPES) rs = None # resultset # Note that if the indexes find query arguments, but the end result # is an empty sequence, we do nothing prioritymap = getattr(self, '_v_prioritymap', None) if prioritymap is None: if DEFAULT_PRIORITYMAP is not None: identifier = '/'.join(self.getPhysicalPath()) default = DEFAULT_PRIORITYMAP.get(identifier, None) if default is not None: prioritymap = self._v_prioritymap = default.copy() else: prioritymap = self._v_prioritymap = {} else: prioritymap = self._v_prioritymap = {} valueindexes = getattr(self, '_v_valueindexes', None) if valueindexes is None: valueindexes = self._v_valueindexes = determine_value_indexes(self) if isinstance(request, dict): keydict = request.copy() else: keydict = {} keydict.update(request.keywords) if isinstance(request.request, dict): keydict.update(request.request) key = keys = keydict.keys() values = [name for name in keys if name in valueindexes] if values: # If we have indexes whose values should be considered, we first # preserve all normal indexes and then add the keys whose values # matter including their value into the key key = [name for name in keys if name not in values] for name in values: # We need to make sure the key is immutable, repr() is an easy way # to do this without imposing restrictions on the types of values key.append((name, repr(keydict.get(name, '')))) key = tuple(sorted(key)) indexes = prioritymap.get(key, []) start = time() index_times = {} if not indexes: pri = [] for i in self.indexes.keys(): if i not in keys: # Do not ask indexes to restrict the result, which aren't part # of the query continue index = self.getIndex(i) _apply_index = getattr(index, "_apply_index", None) if _apply_index is None: continue r = _apply_index(request) result_len = 0 if r is not None: r, u = r result_len = len(r) w, rs = weightedIntersection(rs, r) pri.append((isinstance(index, advancedtypes), result_len, i)) pri.sort() prioritymap[key] = [p[-1] for p in pri] else: for i in indexes: index = self.getIndex(i) _apply_index = getattr(index, "_apply_index", None) if _apply_index is None: continue index_times[i] = time() if isinstance(index, advancedtypes): r = _apply_index(request, res=rs) else: r = _apply_index(request) index_times[i] = time() - index_times[i] if r is not None: # Short circuit if empty result r, u = r if not r: return LazyCat([]) w, rs = weightedIntersection(rs, r) duration = time() - start if LOG_SLOW_QUERIES and duration >= LONG_QUERY_TIME: detailed_times = [] for i, t in index_times.items(): detailed_times.append("%s : %3.2fms" % (i, t*1000)) info = 'query: %3.2fms, priority: %s, key: %s' % (duration*1000, indexes, key) if detailed_times: info += ', detailed: %s' % (', '.join(detailed_times)) logger.info(info) if rs is None: # None of the indexes found anything to do with the request # We take this to mean that the query was empty (an empty filter) # and so we return everything in the catalog if sort_index is None: return LazyMap(self.instantiate, self.data.items(), len(self)) else: return self.sortResults( self.data, sort_index, reverse, limit, merge) elif rs: # We got some results from the indexes. # Sort and convert to sequences. # XXX: The check for 'values' is really stupid since we call # items() and *not* values() if sort_index is None and hasattr(rs, 'values'): # having a 'values' means we have a data structure with # scores. Build a new result set, sort it by score, reverse # it, compute the normalized score, and Lazify it. if not merge: # Don't bother to sort here, return a list of # three tuples to be passed later to mergeResults # note that data_record_normalized_score_ cannot be # calculated and will always be 1 in this case getitem = self.__getitem__ return [(score, (1, score, rid), getitem) for rid, score in rs.items()] rs = rs.byValue(0) # sort it by score max = float(rs[0][0]) # Here we define our getter function inline so that # we can conveniently store the max value as a default arg # and make the normalized score computation lazy def getScoredResult(item, max=max, self=self): """ Returns instances of self._v_brains, or whatever is passed into self.useBrains. """ score, key = item r=self._v_result_class(self.data[key])\ .__of__(self.aq_parent) r.data_record_id_ = key r.data_record_score_ = score r.data_record_normalized_score_ = int(100. * score / max) return r return LazyMap(getScoredResult, rs, len(rs)) elif sort_index is None and not hasattr(rs, 'values'): # no scores if hasattr(rs, 'keys'): rs = rs.keys() return LazyMap(self.__getitem__, rs, len(rs)) else: # sort. If there are scores, then this block is not # reached, therefore 'sort-on' does not happen in the # context of a text index query. This should probably # sort by relevance first, then the 'sort-on' attribute. return self.sortResults(rs, sort_index, reverse, limit, merge) else: # Empty result set return LazyCat([])
17f2ece58a07cbad84d330d3fe2ab1eb09d6fef1 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/12481/17f2ece58a07cbad84d330d3fe2ab1eb09d6fef1/catalog.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1623, 12, 2890, 16, 590, 16, 1524, 67, 1615, 33, 7036, 16, 4219, 33, 20, 16, 1800, 33, 7036, 16, 2691, 33, 21, 4672, 16111, 2352, 273, 3193, 12, 1880, 58, 4722, 40, 10564, 13, 3597, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1623, 12, 2890, 16, 590, 16, 1524, 67, 1615, 33, 7036, 16, 4219, 33, 20, 16, 1800, 33, 7036, 16, 2691, 33, 21, 4672, 16111, 2352, 273, 3193, 12, 1880, 58, 4722, 40, 10564, 13, 3597, ...
Cw = Matrix(P,e,e, columns).transpose()
Cw = Matrix(P, e, e, columns).transpose()
def inversion_polynomials_single_sbox(self, x= None, w=None, biaffine_only=None, correct_only=None): """ Generator for S-Box inversion polynomials of a single sbox.
cd82551727ddbae04c5b28f55b59ec14654a84ab /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9890/cd82551727ddbae04c5b28f55b59ec14654a84ab/sr.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 316, 1589, 67, 3915, 13602, 87, 67, 7526, 67, 87, 2147, 12, 2890, 16, 619, 33, 599, 16, 341, 33, 7036, 16, 324, 1155, 1403, 558, 67, 3700, 33, 7036, 16, 3434, 67, 3700, 33, 7036, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 316, 1589, 67, 3915, 13602, 87, 67, 7526, 67, 87, 2147, 12, 2890, 16, 619, 33, 599, 16, 341, 33, 7036, 16, 324, 1155, 1403, 558, 67, 3700, 33, 7036, 16, 3434, 67, 3700, 33, 7036, 4...
buttonSaveCustomKeys=Button(frameCustom,
frames = [Frame(frameKeySets, padx=2, pady=2, borderwidth=0) for i in range(2)] self.radioKeysBuiltin=Radiobutton(frames[0],variable=self.keysAreBuiltin, value=1,command=self.SetKeysType,text='Use a Built-in Key Set') self.radioKeysCustom=Radiobutton(frames[0],variable=self.keysAreBuiltin, value=0,command=self.SetKeysType,text='Use a Custom Key Set') self.optMenuKeysBuiltin=DynOptionMenu(frames[0], self.builtinKeys,None,command=None) self.optMenuKeysCustom=DynOptionMenu(frames[0], self.customKeys,None,command=None) self.buttonDeleteCustomKeys=Button(frames[1],text='Delete Custom Key Set', command=self.DeleteCustomKeys) buttonSaveCustomKeys=Button(frames[1],
def CreatePageKeys(self): #tkVars self.bindingTarget=StringVar(self) self.builtinKeys=StringVar(self) self.customKeys=StringVar(self) self.keysAreBuiltin=BooleanVar(self) self.keyBinding=StringVar(self) ##widget creation #body frame frame=self.tabPages.pages['Keys'].frame #body section frames frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE, text=' Custom Key Bindings ') frameKeySets=LabelFrame(frame,borderwidth=2,relief=GROOVE, text=' Key Set ') #frameCustom frameTarget=Frame(frameCustom) labelTargetTitle=Label(frameTarget,text='Action - Key(s)') scrollTargetY=Scrollbar(frameTarget) scrollTargetX=Scrollbar(frameTarget,orient=HORIZONTAL) self.listBindings=Listbox(frameTarget,takefocus=FALSE, exportselection=FALSE) self.listBindings.bind('<ButtonRelease-1>',self.KeyBindingSelected) scrollTargetY.config(command=self.listBindings.yview) scrollTargetX.config(command=self.listBindings.xview) self.listBindings.config(yscrollcommand=scrollTargetY.set) self.listBindings.config(xscrollcommand=scrollTargetX.set) self.buttonNewKeys=Button(frameCustom,text='Get New Keys for Selection', command=self.GetNewKeys,state=DISABLED) buttonSaveCustomKeys=Button(frameCustom, text='Save as New Custom Key Set',command=self.SaveAsNewKeySet) #frameKeySets labelTypeTitle=Label(frameKeySets,text='Select : ') self.radioKeysBuiltin=Radiobutton(frameKeySets,variable=self.keysAreBuiltin, value=1,command=self.SetKeysType,text='a Built-in Key Set') self.radioKeysCustom=Radiobutton(frameKeySets,variable=self.keysAreBuiltin, value=0,command=self.SetKeysType,text='a Custom Key Set') self.optMenuKeysBuiltin=DynOptionMenu(frameKeySets, self.builtinKeys,None,command=None) self.optMenuKeysCustom=DynOptionMenu(frameKeySets, self.customKeys,None,command=None) self.buttonDeleteCustomKeys=Button(frameKeySets,text='Delete Custom Key Set', command=self.DeleteCustomKeys) ##widget packing #body frameCustom.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH) frameKeySets.pack(side=LEFT,padx=5,pady=5,fill=Y) #frameCustom buttonSaveCustomKeys.pack(side=BOTTOM,fill=X,padx=5,pady=5) self.buttonNewKeys.pack(side=BOTTOM,fill=X,padx=5,pady=5) frameTarget.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH) #frame target frameTarget.columnconfigure(0,weight=1) frameTarget.rowconfigure(1,weight=1) labelTargetTitle.grid(row=0,column=0,columnspan=2,sticky=W) self.listBindings.grid(row=1,column=0,sticky=NSEW) scrollTargetY.grid(row=1,column=1,sticky=NS) scrollTargetX.grid(row=2,column=0,sticky=EW) #frameKeySets labelTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5) self.radioKeysBuiltin.pack(side=TOP,anchor=W,padx=5) self.radioKeysCustom.pack(side=TOP,anchor=W,padx=5,pady=2) self.optMenuKeysBuiltin.pack(side=TOP,fill=X,padx=5,pady=5) self.optMenuKeysCustom.pack(side=TOP,fill=X,anchor=W,padx=5,pady=5) self.buttonDeleteCustomKeys.pack(side=TOP,fill=X,padx=5,pady=5) return frame
1a560bd8eaec35366d40c44a551ff7c3dddf278b /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/3187/1a560bd8eaec35366d40c44a551ff7c3dddf278b/configDialog.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1788, 1964, 2396, 12, 2890, 4672, 468, 16099, 5555, 365, 18, 7374, 2326, 33, 780, 1537, 12, 2890, 13, 365, 18, 24553, 2396, 33, 780, 1537, 12, 2890, 13, 365, 18, 3662, 2396, 33, 780, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1788, 1964, 2396, 12, 2890, 4672, 468, 16099, 5555, 365, 18, 7374, 2326, 33, 780, 1537, 12, 2890, 13, 365, 18, 24553, 2396, 33, 780, 1537, 12, 2890, 13, 365, 18, 3662, 2396, 33, 780, ...
if fname in knownProblemFiles:
if os.path.basename(fname) in knownProblemFiles:
def importModel(modelName, srcPath, dstPath = None, clobber=False): "importing model into the database" if dstPath == None: dstPath = initialize.atmosStoragePath(modelName) # Check to see if the destination path exists if not os.path.exists(dstPath): os.makedirs(dstPath) modeldb.initModelTable(modelName) conn = modeldb.getModelDBConnection() for fname in glob(os.path.join(srcPath,'*.dat')): modelSrc = file(fname).read() modelsRawData = re.split('B?EGIN\s+ITERATION\s+\d+\s+COMPLETED',modelSrc) for model in modelsRawData: #problem with split if model == '\n' or model == '': continue teffLoggMatch = re.search('T?EFF\s+(\d+\.\d*)\s+GRAVITY\s+(\d+\.\d*)',model) #searching for metallicity, alpha and microturbulence metalAlphaMatch = re.search('\[([+-]?\d+\.\d+)([ab]?)\]', model) microMatch = re.search('VTURB[ =]?(\d+\.\d+)',model) mixLengthMatch = re.search('ONVECTION (OFF|ON)\s+(\d+\.\d+)',model) pradkMatch = re.search('P?RADK (\d+\.\d+E[+-]?\d+)',model) #Checking the integrity of the model if teffLoggMatch == None: raise casKurImportException( "Current Model does not contain effective temperature:" "\n\n--------\n\n%s" % (model,)) try: if metalAlphaMatch == None: raise casKurImportException( "Current Model does not contain metallicity information:" "\n\n--------\n\n%s" % (model,)) except casKurImportException: knownProblemFiles = ['ap00k2.dat','ap00k4.dat','asun.dat'] if fname in knownProblemFiles: continue if mixLengthMatch == None: raise casKurImportException( "Current Model does not contain mixing length information:" "\n\n--------\n\n%s" % (model,)) #reading in the model parameters convertAlpha = {'':0.0, 'a':0.4, 'b':1.0} teff = float(teffLoggMatch.groups()[0]) logg = float(teffLoggMatch.groups()[1]) feh = float(metalAlphaMatch.groups()[0]) alpha = convertAlpha[metalAlphaMatch.groups()[1]] micro = float(microMatch.groups()[0]) mixing = float(mixLengthMatch.groups()[1]) pradk = float(pradkMatch.groups()[0]) #reading model, pickling it and compressing it deck = readDeck(model) zipdDeck = zlib.compress(pickle.dumps(deck)) #writing to db modeldb.insertModelData(conn, modelName, [teff, logg, feh, micro, alpha, mixing, pradk, zipdDeck]) conn.commit() conn.close()
35deefa797bd4128e14cf5fda96bdb25199e1222 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7433/35deefa797bd4128e14cf5fda96bdb25199e1222/fileio.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1930, 1488, 12, 2284, 461, 16, 19497, 16, 29845, 273, 599, 16, 30152, 33, 8381, 4672, 225, 315, 5666, 310, 938, 1368, 326, 2063, 6, 225, 309, 29845, 422, 599, 30, 29845, 273, 4046, 18,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1930, 1488, 12, 2284, 461, 16, 19497, 16, 29845, 273, 599, 16, 30152, 33, 8381, 4672, 225, 315, 5666, 310, 938, 1368, 326, 2063, 6, 225, 309, 29845, 422, 599, 30, 29845, 273, 4046, 18,...
usage: idle.py [-c command] [-d] [-e] [-s] [-t title] [arg] ... -c command run this command -d enable debugger -e edit mode; arguments are files to be edited -s run $IDLESTARTUP or $PYTHONSTARTUP before anything else -t title set title of shell window When neither -c nor -e is used, and there are arguments, and the first argument is not '-', the first argument is run as a script. Remaining arguments are arguments to the script or to the command run by -c.
usage: idle.py [-c command] [-d] [-i] [-r script] [-s] [-t title] [arg] ... idle file(s) (without options) edit the file(s) -c cmd run the command in a shell -d enable the debugger -i open an interactive shell -i file(s) open a shell and also an editor window for each file -r script run a file as a script in a shell -s run $IDLESTARTUP or $PYTHONSTARTUP before anything else -t title set title of shell window Remaining arguments are applied to the command (-c) or script (-r).
def isatty(self): return 1
96d88422373ffb32aef75157647e0575a0471c03 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/96d88422373ffb32aef75157647e0575a0471c03/PyShell.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 270, 4098, 12, 2890, 4672, 327, 404, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 270, 4098, 12, 2890, 4672, 327, 404, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,...
tx.moveCursor(offset + 0.5 * extraspace, 0)
m = offset + 0.5 * extraspace tx.moveCursor(m, 0)
def cleanBlockQuotedText(text): """This is an internal utility which takes triple- quoted text form within the document and returns (hopefully) the paragraph the user intended originally.""" stripped = string.strip(text) lines = string.split(stripped, '\n') trimmed_lines = map(string.lstrip, lines) return string.join(trimmed_lines, ' ')
6b31b874861a338231612bc3e9fa165b59a293b1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/7053/6b31b874861a338231612bc3e9fa165b59a293b1/paragraph.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2721, 1768, 15919, 1528, 12, 955, 4672, 3536, 2503, 353, 392, 2713, 12788, 1492, 5530, 14543, 17, 9298, 977, 646, 3470, 326, 1668, 471, 1135, 261, 76, 1306, 4095, 13, 326, 10190, 326, 72...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2721, 1768, 15919, 1528, 12, 955, 4672, 3536, 2503, 353, 392, 2713, 12788, 1492, 5530, 14543, 17, 9298, 977, 646, 3470, 326, 1668, 471, 1135, 261, 76, 1306, 4095, 13, 326, 10190, 326, 72...
>>> r = RangeMap( { 0: RangeValueUndefined(), 3: 'a', 6: 'b' } )
>>> r = RangeMap({0: RangeValueUndefined(), 3: 'a', 6: 'b'})
def descending( a, b ): return -ascending( a, b )
59648a863c896e2f14363d0c28a25b3865360d9a /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/992/59648a863c896e2f14363d0c28a25b3865360d9a/win32timezone.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 17044, 12, 279, 16, 324, 262, 30, 327, 300, 3691, 2846, 12, 279, 16, 324, 262, 225, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 17044, 12, 279, 16, 324, 262, 30, 327, 300, 3691, 2846, 12, 279, 16, 324, 262, 225, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -...
if all([is_file(store, p) for p in paths]):
if paths and all([is_file(store, p) for p in paths]):
def is_file(store, path): return store.iter_parent(store.get_iter(path)) is None
ad7c12481cc468cfb54d486338b065f114cb7f85 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6427/ad7c12481cc468cfb54d486338b065f114cb7f85/pattern.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 67, 768, 12, 2233, 16, 589, 4672, 327, 1707, 18, 2165, 67, 2938, 12, 2233, 18, 588, 67, 2165, 12, 803, 3719, 353, 599, 225, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 67, 768, 12, 2233, 16, 589, 4672, 327, 1707, 18, 2165, 67, 2938, 12, 2233, 18, 588, 67, 2165, 12, 803, 3719, 353, 599, 225, 2, -100, -100, -100, -100, -100, -100, -100, -100, -1...
maxstep=.2, c1=.23, c2=0.46, xtrapl=1.1, xtrapu=4., stpmax=50., args=()):
maxstep=.2, c1=.23, c2=0.46, xtrapl=1.1, xtrapu=4., stpmax=50., stpmin=1e-8, args=()): self.stpmin = stpmin
def _line_search(self, func, myfprime, xk, pk, gfk, old_fval, old_old_fval, maxstep=.2, c1=.23, c2=0.46, xtrapl=1.1, xtrapu=4., stpmax=50., args=()):
8951d1c402f77cba079d6d7c95cb54c0cd19b294 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1380/8951d1c402f77cba079d6d7c95cb54c0cd19b294/linesearch.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1369, 67, 3072, 12, 2890, 16, 1326, 16, 3399, 74, 16382, 16, 619, 79, 16, 2365, 16, 13828, 79, 16, 1592, 67, 74, 1125, 16, 1592, 67, 1673, 67, 74, 1125, 16, 943, 4119, 33, 18,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 1369, 67, 3072, 12, 2890, 16, 1326, 16, 3399, 74, 16382, 16, 619, 79, 16, 2365, 16, 13828, 79, 16, 1592, 67, 74, 1125, 16, 1592, 67, 1673, 67, 74, 1125, 16, 943, 4119, 33, 18,...
except IOError, e:
except IOError:
def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.StructureValidator.__init__(self) Bcfg2.Server.Plugin.Generator.__init__(self) self.cachepath = self.data + '/cache'
fb209805adfc35c07e127215f4f828bd44151f00 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/11867/fb209805adfc35c07e127215f4f828bd44151f00/Packages.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 2922, 16, 9290, 4672, 605, 7066, 22, 18, 2081, 18, 3773, 18, 3773, 16186, 2738, 972, 12, 2890, 16, 2922, 16, 9290, 13, 605, 7066, 22, 18, 2081, 18, 377...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 2922, 16, 9290, 4672, 605, 7066, 22, 18, 2081, 18, 3773, 18, 3773, 16186, 2738, 972, 12, 2890, 16, 2922, 16, 9290, 13, 605, 7066, 22, 18, 2081, 18, 377...
def resizeEvent(self, ev): self.tabs.resize(ev.size().width(), ev.size().height())
def resizeEvent(self, ev): self.tabs.resize(ev.size().width(), ev.size().height())
1618dfe9d167f792a4119498d1a371333deb3479 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/6366/1618dfe9d167f792a4119498d1a371333deb3479/OWClusterOptimization.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7041, 1133, 12, 2890, 16, 2113, 4672, 365, 18, 16056, 18, 15169, 12, 14965, 18, 1467, 7675, 2819, 9334, 2113, 18, 1467, 7675, 4210, 10756, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 7041, 1133, 12, 2890, 16, 2113, 4672, 365, 18, 16056, 18, 15169, 12, 14965, 18, 1467, 7675, 2819, 9334, 2113, 18, 1467, 7675, 4210, 10756, 2, -100, -100, -100, -100, -100, -100, -100, -1...
if type not in ('recipe', 'recipeset', 'job'):
if type not in ('task', 'recipe', 'recipeset', 'job'):
def abort(type, target=None, message=None, origin={}, timestamp=None): """ Abort given {recipe,recipeset,job}. Abort currently running recipe, recipeset or job, if no target is specified. """ if type not in ('recipe', 'recipeset', 'job'): raise exceptions.NotImplementedError('type must be recipe, recipeset or job. %r given' % type) return Event('abort', origin, timestamp, type=type, target=target, message=message)
9000e41347a7f27a52977a1980bebce86dcf40e6 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13148/9000e41347a7f27a52977a1980bebce86dcf40e6/event.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6263, 12, 723, 16, 1018, 33, 7036, 16, 883, 33, 7036, 16, 4026, 28793, 2858, 33, 7036, 4672, 3536, 14263, 864, 288, 3927, 3151, 16, 266, 3449, 281, 278, 16, 4688, 5496, 225, 14263, 455...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6263, 12, 723, 16, 1018, 33, 7036, 16, 883, 33, 7036, 16, 4026, 28793, 2858, 33, 7036, 4672, 3536, 14263, 864, 288, 3927, 3151, 16, 266, 3449, 281, 278, 16, 4688, 5496, 225, 14263, 455...
self._setup(self._connection_class(host, port, **x509))
self._setup(self._connection_class(host, port, key_file, cert_file, strict))
def __init__(self, host='', port=None, **x509): # provide a default host, pass the X509 cert info
d46aa37d35811a37397104f02074c8a44e7dbec1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/d46aa37d35811a37397104f02074c8a44e7dbec1/httplib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1479, 2218, 2187, 1756, 33, 7036, 16, 2826, 92, 5995, 4672, 468, 5615, 279, 805, 1479, 16, 1342, 326, 1139, 5995, 3320, 1123, 2, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 1479, 2218, 2187, 1756, 33, 7036, 16, 2826, 92, 5995, 4672, 468, 5615, 279, 805, 1479, 16, 1342, 326, 1139, 5995, 3320, 1123, 2, -100, -100, -100, -100, ...
stringBefore, stringAfter = currentString.split(self.abbreviation)
splitList = currentString.split(self.abbreviation) stringBefore = ''.join(splitList[:-1]) stringAfter = splitList[-1]
def check_input(self, buffer): currentString = ''.join(buffer) if self.settings[IGNORE_CASE_OPTION]: currentString = currentString.lower() if self.abbreviation in currentString: stringBefore, stringAfter = currentString.split(self.abbreviation) # Check trigger character condition if not self.settings[IMMEDIATE_OPTION]: # If not immediate expansion, check last character if len(stringAfter) == 1: # Have a character after abbr if self.settings[WORD_CHARS_REGEX_OPTION].match(stringAfter): # last character(s) is a word char, can't send expansion return None elif len(stringAfter) > 1: # Abbr not at/near end of buffer any more, can't send return None else: # Nothing after abbr yet, can't expand yet return None else: # immediate option enabled, check abbr is at end of buffer if len(stringAfter) > 0: return None # Check chars ahead of abbr # length of stringBefore should always be > 0 if len(stringBefore) > 0: if self.settings[WORD_CHARS_REGEX_OPTION].match(stringBefore[-1]): # last char before is a word char if not self.settings[TRIGGER_INSIDE_OPTION]: # can't trigger when inside a word return None expansion = self.__createExpansion() if expansion is not None: if self.settings[BACKSPACE_OPTION]: # determine how many backspaces to send expansion.backspaces = len(self.abbreviation) + len(stringAfter) if not self.settings[OMIT_TRIGGER_OPTION]: expansion.string += stringAfter return expansion return None
1a3cf0ba0213b6b4b8309dd0bf50ceaf20db8e9c /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/6143/1a3cf0ba0213b6b4b8309dd0bf50ceaf20db8e9c/abbreviation.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 866, 67, 2630, 12, 2890, 16, 1613, 4672, 783, 780, 273, 875, 18, 5701, 12, 4106, 13, 225, 309, 365, 18, 4272, 63, 20118, 67, 13415, 67, 7425, 14542, 783, 780, 273, 783, 780, 18, 8167...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 866, 67, 2630, 12, 2890, 16, 1613, 4672, 783, 780, 273, 875, 18, 5701, 12, 4106, 13, 225, 309, 365, 18, 4272, 63, 20118, 67, 13415, 67, 7425, 14542, 783, 780, 273, 783, 780, 18, 8167...
responsevals.update({'%sname' % unit: self.unit_names[unit], '%svalue' % unit: self.radius_values[unit]*dist}) print "'%s', '%s'" % (response, responsevals)
responsevals.update({'%sname' % unit: self.unit_names[unit], '%svalue' % unit: "%.02f" % (self.radius_values[unit]*dist)})
def distance(self, event, unit, src, dst): (srcp, dstp) = (self.get_place(src), self.get_place(dst)) if not srcp or not dstp: event.addresponse("I don't know of anywhere called %s" % (" or ".join(["'%s'" % place for place in [srcp, dstp] if not place]))) return dist = acos(cos(srcp['lng']) * cos(dstp['lng']) * cos(srcp['lat']) * cos(dstp['lat']) + cos(srcp['lng']) * sin(srcp['lat']) * cos(dstp['lng']) * sin(dstp['lat']) + sin(srcp['lat'])*sin(dstp['lat'])) unit_names = self.unit_names if unit and unit not in self.unit_names: response = u"I don't know the unit '%%(badunit)s'. I know about: %s" % ", ".join(["%%(%sname)s (%%(%sabbrev)s)" % (unit, unit) for unit in self.unit_names]) responsevals = {} for unit in self.unit_names: responsevals.update({"%sname" % unit: self.unit_names[unit], "%sabbrev" % unit: unit}) event.addresponse(response, responsevals) return else: if unit: unit_names = [unit] print unit_names response = u"Approximate distance, as the bot flies, between %%(srcname)s and %%(dstname)s is: %s" % ", ".join(["%%(%svalue)s %%(%sname)s" % (unit, unit) for unit in unit_names]) responsevals = {'srcname': srcp['name'], 'dstname': dstp['name']} for unit in unit_names: responsevals.update({'%sname' % unit: self.unit_names[unit], '%svalue' % unit: self.radius_values[unit]*dist}) print "'%s', '%s'" % (response, responsevals) event.addresponse(response, responsevals)
2455927af5e8d19e6a3fbc0fe954cf15094c0a9f /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/12048/2455927af5e8d19e6a3fbc0fe954cf15094c0a9f/lookup.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3888, 12, 2890, 16, 871, 16, 2836, 16, 1705, 16, 3046, 4672, 261, 4816, 84, 16, 3046, 84, 13, 273, 261, 2890, 18, 588, 67, 964, 12, 4816, 3631, 365, 18, 588, 67, 964, 12, 11057, 37...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3888, 12, 2890, 16, 871, 16, 2836, 16, 1705, 16, 3046, 4672, 261, 4816, 84, 16, 3046, 84, 13, 273, 261, 2890, 18, 588, 67, 964, 12, 4816, 3631, 365, 18, 588, 67, 964, 12, 11057, 37...
(flags["landmark"], dest, random.uniform(0, 2**64), self.input, self.input))
(flags["landmark"], dest, id, self.input, self.input))
def send(self, dest, node): try: if dest == flags["landmark"]: os.system("cpp -P -DLANDMARK=\\\"--\\\" -DIPADDRESS=\\\"%s\\\" -DNODEID=%s %s %s.processed" % \ (dest, random.uniform(0, 2**64), self.input, self.input)) else: os.system("cpp -P -DLANDMARK=\\\"%s\\\" -DIPADDRESS=\\\"%s\\\" -DNODEID=%s %s %s.processed" % \ (flags["landmark"], dest, random.uniform(0, 2**64), self.input, self.input)) file = open(self.input+".processed", 'r') program = file.read() file.close() os.remove("%s.processed" % self.input) print "File %s text added to overlog program." % self.input except: print "ERROR: open file error on file", self.input sys.exit(1) tuple = Tuple.mk() payload = Tuple.mk() tuple.append(Val_Str.mk(dest)) payload.append(Val_Str.mk("overlog")) payload.append(Val_Str.mk(dest)) payload.append(Val_Str.mk(self.myaddress)) payload.append(Val_Str.mk(program)) payload.freeze() tuple.append(Val_Tuple.mk(payload)) tuple.freeze() return self.py_push(0, tuple, lambda: self.push_program(node+1))
794f5bdb9ceae079f58e71416d30042c0dfef00f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/929/794f5bdb9ceae079f58e71416d30042c0dfef00f/loadManyChords.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1366, 12, 2890, 16, 1570, 16, 756, 4672, 775, 30, 309, 1570, 422, 2943, 9614, 15733, 3355, 11929, 30, 1140, 18, 4299, 2932, 4057, 84, 300, 52, 300, 8914, 4307, 12693, 33, 1695, 2412, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1366, 12, 2890, 16, 1570, 16, 756, 4672, 775, 30, 309, 1570, 422, 2943, 9614, 15733, 3355, 11929, 30, 1140, 18, 4299, 2932, 4057, 84, 300, 52, 300, 8914, 4307, 12693, 33, 1695, 2412, 4...
bond direction, or None if it is missing (since the strand ends) or if any bond directions are unset or inconsisent or if any other structural error causes difficulty.
bond direction, or None if it is missing (since the strand ends), or if any bond directions are unset or inconsistent, or if any other structural error causes difficulty, or if ._dna_updater__error is set in either self or in the atom we might otherwise return (even if that error was propogated from elsewhere in that atom's basepair, rather than being a problem with that atom itself).
def strand_next_baseatom(self, bond_direction = None): #bruce 071204 """ Assume self is a PAM strand sugar atom, and bond_direction is -1 or 1. Find the next PAM strand sugar atom (i.e. base atom) in the given bond direction, or None if it is missing (since the strand ends) or if any bond directions are unset or inconsisent or if any other structural error causes difficulty. """ # note: API might be extended to permit passing a baseindex direction # instead, and working on either strand or axis baseatoms. assert bond_direction in (-1, 1) atom1 = self.next_atom_in_bond_direction(bond_direction) # might be None if atom1 is None: return None symbol = atom1.element.symbol # KLUGE -- should use another element attr, or maybe Atom subclass if symbol[0:2] not in ('Ss', 'Sj', 'Hp', 'Pl'): # base or base linker atoms (#todo: verify or de-kluge) return None if symbol.startswith('Pl'): # base linker atom atom1 = atom1.next_atom_in_bond_direction(direction) # might be None assert atom1 is not self # (false would imply one bond had two directions, # or two bonds between same two atoms) return atom1 # might be None
56bf6dbbb37402a11ef62d251ce618080a4f8773 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/11221/56bf6dbbb37402a11ef62d251ce618080a4f8773/chem.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 16706, 67, 4285, 67, 1969, 7466, 12, 2890, 16, 8427, 67, 9855, 273, 599, 4672, 468, 2848, 3965, 10934, 2138, 3028, 3536, 15983, 365, 353, 279, 453, 2192, 16706, 26178, 3179, 16, 471, 842...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 16706, 67, 4285, 67, 1969, 7466, 12, 2890, 16, 8427, 67, 9855, 273, 599, 4672, 468, 2848, 3965, 10934, 2138, 3028, 3536, 15983, 365, 353, 279, 453, 2192, 16706, 26178, 3179, 16, 471, 842...
for k in os.environ.keys(): if k == 'NO_PROXY':
for k in list(os.environ): if 'proxy' not in k.lower():
def setUp(self): # Records changes to env vars self.env = support.EnvironmentVarGuard() # Delete all proxy related env vars for k in os.environ.keys(): if k == 'NO_PROXY': self.env.unset(k)
b3a88b5c421fe867e1f5ecdd179b190614729933 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8546/b3a88b5c421fe867e1f5ecdd179b190614729933/test_urllib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 24292, 12, 2890, 4672, 468, 27390, 3478, 358, 1550, 4153, 365, 18, 3074, 273, 2865, 18, 5494, 1537, 16709, 1435, 468, 2504, 777, 2889, 3746, 1550, 4153, 364, 417, 316, 666, 12, 538, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 24292, 12, 2890, 4672, 468, 27390, 3478, 358, 1550, 4153, 365, 18, 3074, 273, 2865, 18, 5494, 1537, 16709, 1435, 468, 2504, 777, 2889, 3746, 1550, 4153, 364, 417, 316, 666, 12, 538, 18, ...
if rec.getAttribute("forcecreate"): pass else:
if not self.nodeattr2bool(rec, 'forcecreate', True):
def _tag_record(self, cr, rec, data_node=None): rec_model = rec.getAttribute("model").encode('ascii') model = self.pool.get(rec_model) assert model, "The model %s does not exist !" % (rec_model,) rec_id = rec.getAttribute("id").encode('ascii') self._test_xml_id(rec_id)
db718a92361b34385fa8ad00d126f99f1c44bc47 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/12853/db718a92361b34385fa8ad00d126f99f1c44bc47/convert.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 2692, 67, 3366, 12, 2890, 16, 4422, 16, 1950, 16, 501, 67, 2159, 33, 7036, 4672, 1950, 67, 2284, 273, 1950, 18, 588, 1499, 2932, 2284, 20387, 3015, 2668, 9184, 6134, 938, 273, 365...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 2692, 67, 3366, 12, 2890, 16, 4422, 16, 1950, 16, 501, 67, 2159, 33, 7036, 4672, 1950, 67, 2284, 273, 1950, 18, 588, 1499, 2932, 2284, 20387, 3015, 2668, 9184, 6134, 938, 273, 365...
country=attrs['country'].encode('utf-8'),
country=eval(attrs['country'].encode('utf-8')),
def importNySMAPProject(self, param, id, attrs, content, properties, discussion, objects): #this method is called during the import process try: param = abs(int(param)) except: param = 0 if param == 3: #just try to delete the object try: self.manage_delObjects([id]) except: pass else: ob = self._getOb(id, None) if param in [0, 1] or (param==2 and ob is None): if param == 1: #delete the object if exists try: self.manage_delObjects([id]) except: pass addNySMAPProject(self, id=id, sortorder=attrs['sortorder'].encode('utf-8'), main_issues=attrs['main_issues'].encode('utf-8'), country=attrs['country'].encode('utf-8'), tools=attrs['tools'].encode('utf-8'), budget=attrs['budget'].encode('utf-8'), timeframe=attrs['timeframe'].encode('utf-8'), priority_area=attrs['priority_area'].encode('utf-8'), focus=attrs['focus'].encode('utf-8'), contributor=self.utEmptyToNone(attrs['contributor'].encode('utf-8')), discussion=abs(int(attrs['discussion'].encode('utf-8')))) ob = self._getOb(id) for property, langs in properties.items(): [ ob._setLocalPropValue(property, lang, langs[lang]) for lang in langs if langs[lang]!='' ] ob.approveThis(approved=abs(int(attrs['approved'].encode('utf-8'))), approved_by=self.utEmptyToNone(attrs['approved_by'].encode('utf-8'))) if attrs['releasedate'].encode('utf-8') != '': ob.setReleaseDate(attrs['releasedate'].encode('utf-8')) ob.import_comments(discussion) self.recatalogNyObject(ob)
d56aa47ecf359f47910ec4f0b4b9404ccebd0c2b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3287/d56aa47ecf359f47910ec4f0b4b9404ccebd0c2b/NySMAPProject.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1930, 50, 93, 7303, 2203, 4109, 12, 2890, 16, 579, 16, 612, 16, 3422, 16, 913, 16, 1790, 16, 14716, 16, 2184, 4672, 468, 2211, 707, 353, 2566, 4982, 326, 1930, 1207, 775, 30, 579, 27...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1930, 50, 93, 7303, 2203, 4109, 12, 2890, 16, 579, 16, 612, 16, 3422, 16, 913, 16, 1790, 16, 14716, 16, 2184, 4672, 468, 2211, 707, 353, 2566, 4982, 326, 1930, 1207, 775, 30, 579, 27...
if isinstance(collectionName, unicode): collectionName = str(collectionName)
collectionName = str(collectionName)
def __init__(self, url, collectionName, sendToList, account=None):
926120ca68a681763a888f931107c39774549c3e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9228/926120ca68a681763a888f931107c39774549c3e/sharing.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 880, 16, 17137, 16, 1366, 25772, 16, 2236, 33, 7036, 4672, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 880, 16, 17137, 16, 1366, 25772, 16, 2236, 33, 7036, 4672, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -10...
return 10
return defaultevents
def get_events(): maxevents = 0 try: openfile = open('/proc/sys/kernel/threads-max', 'r') except IOError: return 10 #TODO--- #Should I catch the file read to make sure its an int? maxevents = int(openfile.read()) openfile.close() return maxevents
cb66dc22787e671d413da76e0eefbe3bf920fb9d /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/7995/cb66dc22787e671d413da76e0eefbe3bf920fb9d/Linux_resources.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 5989, 13332, 943, 5989, 273, 374, 225, 775, 30, 1696, 768, 273, 1696, 2668, 19, 9381, 19, 9499, 19, 8111, 19, 12495, 17, 1896, 2187, 296, 86, 6134, 1335, 8340, 30, 327, 805, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 5989, 13332, 943, 5989, 273, 374, 225, 775, 30, 1696, 768, 273, 1696, 2668, 19, 9381, 19, 9499, 19, 8111, 19, 12495, 17, 1896, 2187, 296, 86, 6134, 1335, 8340, 30, 327, 805, ...
config = app.upload_set_config.get(setname)
config = current_app.upload_set_config.get(setname)
def uploaded_file(setname, filename): config = app.upload_set_config.get(setname) if config is None: abort(404) send_from_directory(config.destination, filename)
b2daec58c4319cac779b169c832b4d22a7e5b748 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/798/b2daec58c4319cac779b169c832b4d22a7e5b748/uploads.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 9140, 67, 768, 12, 542, 529, 16, 1544, 4672, 642, 273, 783, 67, 2910, 18, 6327, 67, 542, 67, 1425, 18, 588, 12, 542, 529, 13, 309, 642, 353, 599, 30, 6263, 12, 11746, 13, 1366, 67,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 9140, 67, 768, 12, 542, 529, 16, 1544, 4672, 642, 273, 783, 67, 2910, 18, 6327, 67, 542, 67, 1425, 18, 588, 12, 542, 529, 13, 309, 642, 353, 599, 30, 6263, 12, 11746, 13, 1366, 67,...
return getText
return getText
def post_get_convert(self, site, getText): if site.lang == 'eo':
d80769d86c1fc35eac60949d3ec116329b831997 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/4404/d80769d86c1fc35eac60949d3ec116329b831997/wikipedia_family.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1603, 67, 588, 67, 6283, 12, 2890, 16, 2834, 16, 6701, 4672, 309, 2834, 18, 4936, 422, 296, 4361, 4278, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1603, 67, 588, 67, 6283, 12, 2890, 16, 2834, 16, 6701, 4672, 309, 2834, 18, 4936, 422, 296, 4361, 4278, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
exp_column_name = comp_file_info['Exp_Col_Name']
exp_column_name = comp_file_info['Exp_Col_Name'].strip()
def extract_comp_data(comp_file_info): ## Read in from config file and Process data from Source .csv files. exp_data_dict = {} mod_data_dict = {} #List of variables from configuration file column names. exp_data_filename = comp_file_info['Exp_Filename'] #String of filename exp_column_name = comp_file_info['Exp_Col_Name'] #Experimental Data Column Name exp_column_name_row_num = int(comp_file_info['Exp_Col_Name_Row'])-1 #Experimental Data Column Name Row Number exp_data_row_num = int(comp_file_info['Exp_Data_Row'])-1 #Experimental Data Starting Row Number mod_data_filename = comp_file_info['Mod_Filename'] #String of filename mod_column_name = comp_file_info['Mod_Col_Name'] #Modeling Data Column Name mod_column_name_row_num = int(comp_file_info['Mod_Col_Name_Row'])-1 #Modeling Data Column Name Row Number mod_data_row_num = int(comp_file_info['Mod_Data_Row'])-1 #Modeling Data Starting Row Number scatter_data_label = comp_file_info['Quantity']+"-"+comp_file_info['Dataname']+"-"+comp_file_info['Exp_Col_Name'] exp_start_time_data_val = comp_file_info['Exp_Start_(min.)'] #String in minutes to start exp plot data exp_stop_time_data_val = comp_file_info['Exp_End_(min.)'] #String in minutes to stop exp plot data exp_start_time_comp_val = comp_file_info['Exp_Comp_Start_(min.)'] #String in minutes to start exp compare data exp_stop_time_comp_val = comp_file_info['Exp_Comp_End_(min.)'] #String in minutes to start exp compare data exp_initial_value = comp_file_info['Exp_Intitial_Value'] #Initial Value for Quantity mod_start_time_data_val = comp_file_info['Mod_Start_(min.)'] #String in minutes to start mod plot data mod_stop_time_data_val = comp_file_info['Mod_End_(min.)'] #String in minutes to stop mod plot data mod_start_time_comp_val = comp_file_info['Mod_Comp_Start_(min.)'] #String in minutes to start mod compare data mod_stop_time_comp_val = comp_file_info['Mod_Comp_End_(min.)'] #String in minutes to start mod compare data mod_initial_value = comp_file_info['Mod_Intitial_Value'] #Initial Value for Quantity min_max = comp_file_info['max/min'] #String indicating if min or max value is required. def find_start_stop_index(data_dict,col_name,start_time_data,stop_time_data,start_time_comp,stop_time_comp): #This function is used to find index numbers for start and stop points in plotting and min-max values. rowcounter1 = 0 for time_value1 in data_dict[col_name]: if time_value1 >= (float(start_time_data)*60): #print "Set #1" #print "Time Starts at row #:", str(rowcounter1) #print "With a value of:", str(time_value1) time_start_index = rowcounter1 break rowcounter1 += 1 rowcounter2 = 0 for time_value2 in data_dict[col_name]: if float(data_dict[col_name][(len(data_dict[col_name])-1)]) < (float(stop_time_data)*60): #print "Specified end of plot time is greater than end of time in the data set. \nUsing last value in the time column.\n" #print "Time used is: "+str(float(data_dict[col_name][(len(data_dict[col_name])-1)]))+"\n" time_end_index = (len(data_dict[col_name])-1) break else: row_number2 = (rowcounter2 - 1) #print "Set #2" #print "Time Ends at row #: "+str(row_number2) #print "With a value of: "+str(data_dict[col_name][row_number2]) time_end_index = row_number2 break if time_value2 < (float(stop_time_data)*60): rowcounter2 += 1 rowcounter3 = 0 for time_value3 in data_dict[col_name]: if time_value3 >= (float(start_time_comp)*60): #print "Set #3" #print "Comparison Time Starts at row #:", str(rowcounter3) #print "With a value of:", str(time_value3) minmax_start_index = rowcounter3 break rowcounter3 += 1 rowcounter4 = 0 for time_value4 in data_dict[col_name]: if float(data_dict[col_name][(len(data_dict[col_name])-1)]) < (float(stop_time_comp)*60): #print "Specified end of comparison time is greater than end of time in the data set. \nUsing last value in the time column." #print "Time used is: "+str(float(data_dict[col_name][(len(data_dict[col_name])-1)]))+"\n" minmax_end_index = (len(data_dict[col_name])-1) break if time_value4 < (float(stop_time_data)*60): rowcounter4 += 1 else: row_number4 = (rowcounter4 - 1) #print "Set #4" #print "Comparison Time Ends at row #: "+str(row_number4) #print "With a value of: "+str(data_dict[col_name][row_number4]) minmax_end_index = row_number4 break return (time_start_index, time_end_index, minmax_start_index, minmax_end_index) exp_file_object = open(data_directory+exp_data_filename, "rb") mod_file_object = open(data_directory+mod_data_filename, "rb") ## Start File Processing #Read in experimental data and flip lists from rows to columns. print "Reading in:", exp_data_filename exp_data_cols = zip(*csv.reader(exp_file_object)) #Convert tuples to lists. exp_data_list = [list(sublist) for sublist in exp_data_cols] #Pull the Time column name out and strip whitespace from ends of string. exp_time_col_name = (exp_data_list[0][exp_column_name_row_num]).strip() #Build Experimental Data Dictionary for exp_list in exp_data_list: exp_data_dict[(exp_list[exp_column_name_row_num]).strip()] = map(float, exp_list[exp_data_row_num:]) #print "Exp. Data Dict:", exp_data_dict[(exp_list[exp_column_name_row_num]).strip()] #Read in model data and flip lists from rows to columns. print "Reading in:", mod_data_filename mod_data_cols = zip(*csv.reader(mod_file_object)) #Convert tuples to lists. mod_data_list = [list(sublist) for sublist in mod_data_cols] #Pull the Time column name out and strip whitespace from ends of string. mod_time_col_name = (mod_data_list[0][mod_column_name_row_num]).strip() #Build Prediction/Model Data Dictionary for mod_list in mod_data_list: mod_data_dict[(mod_list[mod_column_name_row_num]).strip()] = map(float, mod_list[mod_data_row_num:]) #print "Model Data Dict:", mod_data_dict[(mod_list[mod_column_name_row_num]).strip()] exp_comp_ranges = find_start_stop_index(exp_data_dict,exp_time_col_name,exp_start_time_data_val,exp_stop_time_data_val,exp_start_time_comp_val,exp_stop_time_comp_val) #print exp_comp_ranges mod_comp_ranges = find_start_stop_index(mod_data_dict,mod_time_col_name,mod_start_time_data_val,mod_stop_time_data_val,mod_start_time_comp_val,mod_stop_time_comp_val) #print mod_comp_ranges ##Find max or min values. #Exp min_max value exp_data_values_comp = exp_data_dict[exp_column_name][exp_comp_ranges[2]:exp_comp_ranges[3]] if min_max == 'max': #print min_max, str(max(exp_data_values_comp)) exp_peak_value = max(exp_data_values_comp) elif min_max == 'min': #print min_max, str(min(exp_data_values_comp)) exp_peak_value = min(exp_data_values_comp) else: print "Min or Max is undefined in the input file." #Mod min_max value mod_data_values_comp = mod_data_dict[mod_column_name][mod_comp_ranges[2]:mod_comp_ranges[3]] #print mod_data_values_comp if min_max == 'max': #print min_max, str(max(mod_data_values_comp)) mod_peak_value = max(mod_data_values_comp) elif min_max == 'min': #print min_max, str(min(mod_data_values_comp)) mod_peak_value = min(mod_data_values_comp) else: print "Min or Max is undefined in the input file." #print mod_peak_value #print mod_initial_value #print exp_peak_value #print exp_initial_value relative_difference = compute_difference(mod_peak_value,mod_initial_value,exp_peak_value,exp_initial_value) #Append Min_Max Values to Global Scatter Data Dictionary. scatter_data_dict[scatter_data_label] = [exp_peak_value,mod_peak_value,relative_difference] #Create data lists based on specified ranges exp_data_seconds = zip(exp_data_dict[exp_time_col_name][exp_comp_ranges[0]:exp_comp_ranges[1]], exp_data_dict[exp_column_name][exp_comp_ranges[0]:exp_comp_ranges[1]]) #print exp_data_seconds mod_data_seconds = zip(mod_data_dict[mod_time_col_name][mod_comp_ranges[0]:mod_comp_ranges[1]], mod_data_dict[mod_column_name][mod_comp_ranges[0]:mod_comp_ranges[1]]) #print mod_data_seconds #Convert time to minutes from seconds. exp_data = [[x[0] / 60, x[1]] for x in exp_data_seconds] #print exp_data mod_data = [[x[0] / 60, x[1]] for x in mod_data_seconds] #print mod_data # Return list of X,Y lists. #exp_data=[[exp_time,exp_quantity_value],[exp_time,exp_quantity_value]] #mod_data=[[mod_time,mod_quantity_value],[mod_time,mod_quantity_value]] return [exp_data,mod_data]
93e8fdd038707661dcc82494e49803ca362e12a7 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12/93e8fdd038707661dcc82494e49803ca362e12a7/Validation_Data_Processor.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2608, 67, 2919, 67, 892, 12, 2919, 67, 768, 67, 1376, 4672, 7541, 2720, 316, 628, 642, 585, 471, 4389, 501, 628, 4998, 263, 6715, 1390, 18, 225, 1329, 67, 892, 67, 1576, 273, 2618, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2608, 67, 2919, 67, 892, 12, 2919, 67, 768, 67, 1376, 4672, 7541, 2720, 316, 628, 642, 585, 471, 4389, 501, 628, 4998, 263, 6715, 1390, 18, 225, 1329, 67, 892, 67, 1576, 273, 2618, 6...
macs = self.proxi.get_device_list()
macs = [] try: macs = self.proxi.get_device_list() except: macs = [['', _('Sorry, the bluetooth device is busy connecting.\nPlease enter a correct mac address or no address at all\nfor the config that is not connecting and try again later.')]]
def cb_btnScan_clicked(self): #Idle callback to show the watch cursor while scanning (HIG) self.tmpMac = self.proxi.dev_mac self.proxi.dev_mac = '' self.proxi.kill_connection() macs = self.proxi.get_device_list() self.proxi.dev_mac = tmpMac self.model.clear() for mac in macs: self.model.append([mac[0], mac[1]]) self.window.window.set_cursor(None) self.setSensitiveConfigManagement(True)
8aae9f10adc270617c9265375716f9c0ef1eb095 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/61/8aae9f10adc270617c9265375716f9c0ef1eb095/proximity.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2875, 67, 11898, 7972, 67, 7475, 329, 12, 2890, 4672, 468, 13834, 1348, 358, 2405, 326, 4267, 3347, 1323, 21138, 261, 44, 3047, 13, 365, 18, 5645, 9990, 273, 365, 18, 20314, 77, 18, 52...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2875, 67, 11898, 7972, 67, 7475, 329, 12, 2890, 4672, 468, 13834, 1348, 358, 2405, 326, 4267, 3347, 1323, 21138, 261, 44, 3047, 13, 365, 18, 5645, 9990, 273, 365, 18, 20314, 77, 18, 52...
name = m.group(1)
name = m.group(1) + " " + m.group(2)
def postprocess_loop(loop, loops, memo): if loop in memo: return memo.add(loop) if loop is None: return m = re.search("debug_merge_point\('<code object (.*?)>", loop.content) if m is None: name = '?' else: name = m.group(1) opsno = loop.content.count("\n") lastline = loop.content[loop.content.rfind("\n", 0, len(loop.content) - 2):] m = re.search('descr=<Loop(\d+)', lastline) if m is not None: assert isinstance(loop, FinalBlock) loop.target = loops[int(m.group(1))] bcodes = loop.content.count('debug_merge_point') loop.linksource = "loop" + str(loop.no) loop.header = "%s loop%d\n%d operations\n%d opcodes" % (name, loop.no, opsno, bcodes) loop.header += "\n" * (opsno / 100) if bcodes == 0: loop.ratio = opsno else: loop.ratio = float(opsno) / bcodes loop.content = "Logfile at %d" % loop.startlineno loop.postprocess(loops, memo)
d83e5c680c37f76d4288f3d92542d307e6d2f489 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6934/d83e5c680c37f76d4288f3d92542d307e6d2f489/otherviewer.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1603, 2567, 67, 6498, 12, 6498, 16, 14075, 16, 11063, 4672, 309, 2798, 316, 11063, 30, 327, 11063, 18, 1289, 12, 6498, 13, 309, 2798, 353, 599, 30, 327, 312, 273, 283, 18, 3072, 2932, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1603, 2567, 67, 6498, 12, 6498, 16, 14075, 16, 11063, 4672, 309, 2798, 316, 11063, 30, 327, 11063, 18, 1289, 12, 6498, 13, 309, 2798, 353, 599, 30, 327, 312, 273, 283, 18, 3072, 2932, ...
log.info("changing mode of %s to %o", file, mode)
log.info("changing mode of %s", file)
def run (self): if not self.skip_build: self.run_command('build_scripts') self.outfiles = self.copy_tree(self.build_dir, self.install_dir) if os.name == 'posix': # Set the executable bits (owner, group, and world) on # all the scripts we just installed. for file in self.get_outputs(): if self.dry_run: log.info("changing mode of %s to %o", file, mode) else: mode = ((os.stat(file)[ST_MODE]) | 0111) & 07777 log.info("changing mode of %s to %o", file, mode) os.chmod(file, mode)
3a29840c9a5f255a2a7327f3c0fbcff25f1eb65e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3187/3a29840c9a5f255a2a7327f3c0fbcff25f1eb65e/install_scripts.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 261, 2890, 4672, 309, 486, 365, 18, 7457, 67, 3510, 30, 365, 18, 2681, 67, 3076, 2668, 3510, 67, 12827, 6134, 365, 18, 659, 2354, 273, 365, 18, 3530, 67, 3413, 12, 2890, 18, 35...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 261, 2890, 4672, 309, 486, 365, 18, 7457, 67, 3510, 30, 365, 18, 2681, 67, 3076, 2668, 3510, 67, 12827, 6134, 365, 18, 659, 2354, 273, 365, 18, 3530, 67, 3413, 12, 2890, 18, 35...
box.pack_start(sw)
box.pack_start(canvas)
def make_root(self): conversation = hippo.CanvasBox( spacing=4, background_color=COLOR_WHITE.get_int()) self.conversation = conversation
2b0b8453335f8041fa5a2d1b057ee1dcc148fbba /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/7190/2b0b8453335f8041fa5a2d1b057ee1dcc148fbba/pippy_app.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1221, 67, 3085, 12, 2890, 4672, 10039, 273, 366, 625, 1631, 18, 12971, 3514, 12, 13259, 33, 24, 16, 5412, 67, 3266, 33, 10989, 67, 16861, 18, 588, 67, 474, 10756, 365, 18, 25131, 273, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1221, 67, 3085, 12, 2890, 4672, 10039, 273, 366, 625, 1631, 18, 12971, 3514, 12, 13259, 33, 24, 16, 5412, 67, 3266, 33, 10989, 67, 16861, 18, 588, 67, 474, 10756, 365, 18, 25131, 273, ...
value = instance[field.getName()]
if field.type in ('file', 'image', 'object'): continue accessor = field.getEditAccessor(instance) if not accessor: continue kw = {'raw':1, 'field': field.__name__} value = mapply(accessor, **kw)
def marshall(self, instance, **kwargs): p = instance.getPrimaryField() body = p and instance[p.getName()] or '' pname = p and p.getName() or None content_type = length = None # Gather/Guess content type if IBaseUnit.isImplementedBy(body): content_type = str(body.getContentType()) body = body.getRaw() else: if p and hasattr(p, 'getContentType'): content_type = p.getContentType(instance) or 'text/plain' else: content_type = body and guess_content_type(body) or 'text/plain'
66f09fba2dbfa86ec8cd2b388046a650d8134455 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12165/66f09fba2dbfa86ec8cd2b388046a650d8134455/Marshall.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1833, 12, 2890, 16, 791, 16, 2826, 4333, 4672, 293, 273, 791, 18, 588, 6793, 974, 1435, 1417, 273, 293, 471, 791, 63, 84, 18, 17994, 1435, 65, 578, 875, 19952, 273, 293, 471, 293, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1833, 12, 2890, 16, 791, 16, 2826, 4333, 4672, 293, 273, 791, 18, 588, 6793, 974, 1435, 1417, 273, 293, 471, 791, 63, 84, 18, 17994, 1435, 65, 578, 875, 19952, 273, 293, 471, 293, 18...
def readfp(self, fp): """Read a single mime.types-format file.""" map = self.types_map
def readfp(self, fp, strict=True): """ Read a single mime.types-format file. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """
def readfp(self, fp): """Read a single mime.types-format file.""" map = self.types_map while 1: line = fp.readline() if not line: break words = line.split() for i in range(len(words)): if words[i][0] == '#': del words[i:] break if not words: continue type, suffixes = words[0], words[1:] for suff in suffixes: map['.' + suff] = type
7642f98edc75ce0af326000834761c8280cae057 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3187/7642f98edc75ce0af326000834761c8280cae057/mimetypes.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 7944, 12, 2890, 16, 4253, 16, 5490, 33, 5510, 4672, 3536, 2720, 279, 2202, 4892, 18, 2352, 17, 2139, 585, 18, 225, 971, 5490, 353, 638, 16, 1779, 903, 506, 3096, 358, 666, 434, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 855, 7944, 12, 2890, 16, 4253, 16, 5490, 33, 5510, 4672, 3536, 2720, 279, 2202, 4892, 18, 2352, 17, 2139, 585, 18, 225, 971, 5490, 353, 638, 16, 1779, 903, 506, 3096, 358, 666, 434, ...
def _recode_to_utf8(space, text, encoding): return space.str_w(app_recode_to_utf8(space, space.wrap(text),
def recode_to_utf8(space, text, encoding): return space.str_w(_recode_to_utf8(space, space.wrap(text),
def _recode_to_utf8(space, text, encoding): return space.str_w(app_recode_to_utf8(space, space.wrap(text), space.wrap(encoding)))
01d07179a3f59ade58abef40f5a78a592ad09ac7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/6934/01d07179a3f59ade58abef40f5a78a592ad09ac7/pythonparse.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 283, 710, 67, 869, 67, 3158, 28, 12, 2981, 16, 977, 16, 2688, 4672, 327, 3476, 18, 701, 67, 91, 24899, 266, 710, 67, 869, 67, 3158, 28, 12, 2981, 16, 3476, 18, 4113, 12, 955, 3631,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 283, 710, 67, 869, 67, 3158, 28, 12, 2981, 16, 977, 16, 2688, 4672, 327, 3476, 18, 701, 67, 91, 24899, 266, 710, 67, 869, 67, 3158, 28, 12, 2981, 16, 3476, 18, 4113, 12, 955, 3631,...
raise "\nERROR: Invalid Tree Item. "
raise Exception("\nERROR: Invalid Tree Item. ")
def GetItemFont(self, item): """Returns the item font."""
e1463b9df091ad0e9d76292564d4389882b28de1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12725/e1463b9df091ad0e9d76292564d4389882b28de1/customtreectrl.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 968, 1180, 5711, 12, 2890, 16, 761, 4672, 3536, 1356, 326, 761, 3512, 12123, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 968, 1180, 5711, 12, 2890, 16, 761, 4672, 3536, 1356, 326, 761, 3512, 12123, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
z.constructors(lambda x: len(x.arguments) > 1).exclude() for t in ('train', 'find_nearest'):
z.constructors(lambda x: 'CvMat' in x.decl_string).exclude() for t in ('find_nearest', 'train'):
def _KLASS__repr__(self): return "KLASS(min_val=" + repr(self.min_val) + ", max_val=" + repr(self.max_val) \ + ", step=" + repr(self.step) + ")"
9b2ab83ba27bd4e10b8892602561bd714980c15b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4310/9b2ab83ba27bd4e10b8892602561bd714980c15b/ml_h.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 47, 3979, 972, 12715, 972, 12, 2890, 4672, 327, 315, 47, 3979, 12, 1154, 67, 1125, 1546, 397, 8480, 12, 2890, 18, 1154, 67, 1125, 13, 397, 3104, 943, 67, 1125, 1546, 397, 8480, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 47, 3979, 972, 12715, 972, 12, 2890, 4672, 327, 315, 47, 3979, 12, 1154, 67, 1125, 1546, 397, 8480, 12, 2890, 18, 1154, 67, 1125, 13, 397, 3104, 943, 67, 1125, 1546, 397, 8480, ...
if not boo.online: raise RuntimeError('media offline')
def __get_data_3(self, cr, uid, boo, ira, context): if not boo.online: raise RuntimeError('media offline') if boo.type == 'filestore': if not ira.store_fname: # On a migrated db, some files may have the wrong storage type # try to fix their directory. if ira.file_size: self._doclog.warning( "ir.attachment #%d does not have a filename, but is at filestore, fix it!" % ira.id) return None fpath = os.path.join(boo.path, ira.store_fname) return file(fpath, 'rb').read() elif boo.type == 'db64': # TODO: we need a better api for large files if ira.db_datas: out = base64.decodestring(ira.db_datas) else: out = '' return out elif boo.type == 'db': # We do an explicit query, to avoid type transformations. cr.execute('SELECT db_datas FROM ir_attachment WHERE id = %s', (ira.id,)) res = cr.fetchone() if res: return res[0] else: return '' elif boo.type == 'realstore': if not ira.store_fname: # On a migrated db, some files may have the wrong storage type # try to fix their directory. if ira.file_size: self._doclog.warning("ir.attachment #%d does not have a filename, trying the name." %ira.id) sfname = ira.name fpath = os.path.join(boo.path,ira.store_fname or ira.name) if os.path.exists(fpath): return file(fpath,'rb').read() elif not ira.store_fname: return None else: raise IOError("File not found: %s" % fpath)
26935b22e6e045097f3c67418c982392e9fa3a9e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/26935b22e6e045097f3c67418c982392e9fa3a9e/document_storage.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 588, 67, 892, 67, 23, 12, 2890, 16, 4422, 16, 4555, 16, 800, 83, 16, 277, 354, 16, 819, 4672, 309, 800, 83, 18, 723, 422, 296, 7540, 25626, 4278, 309, 486, 277, 354, 18, 2233...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 588, 67, 892, 67, 23, 12, 2890, 16, 4422, 16, 4555, 16, 800, 83, 16, 277, 354, 16, 819, 4672, 309, 800, 83, 18, 723, 422, 296, 7540, 25626, 4278, 309, 486, 277, 354, 18, 2233...
if len(args) == 0:
if not arg.strip():
def command_topic(self, arg): """ /topic [new topic] """ args = arg.split() room = self.current_room() if len(args) == 0: self.add_message_to_room(room, _("The subject of the room is: %s") % room.topic) return subject = ' '.join(args) if not room.joined or room.name == "Info": return muc.change_subject(self.xmpp, room.name, subject)
efc7b22bdb316633fb50af3b674ee117aefc8619 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9814/efc7b22bdb316633fb50af3b674ee117aefc8619/gui.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1296, 67, 10476, 12, 2890, 16, 1501, 4672, 3536, 342, 10476, 306, 2704, 3958, 65, 3536, 833, 273, 1501, 18, 4939, 1435, 7725, 273, 365, 18, 2972, 67, 13924, 1435, 309, 486, 1501, 18, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1296, 67, 10476, 12, 2890, 16, 1501, 4672, 3536, 342, 10476, 306, 2704, 3958, 65, 3536, 833, 273, 1501, 18, 4939, 1435, 7725, 273, 365, 18, 2972, 67, 13924, 1435, 309, 486, 1501, 18, 6...
Accepts an incoming connection to a listening TCP socket.
Obtains an incoming message that was sent to an IP and port.
def getconnection(self): """ <Purpose> Accepts an incoming connection to a listening TCP socket.
cdc73d35527f2e8ba9aecc2ad1a44f2831f9c68c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7263/cdc73d35527f2e8ba9aecc2ad1a44f2831f9c68c/emulcomm.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 4071, 12, 2890, 4672, 3536, 411, 10262, 4150, 34, 27158, 392, 6935, 1459, 358, 279, 13895, 9911, 2987, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 4071, 12, 2890, 4672, 3536, 411, 10262, 4150, 34, 27158, 392, 6935, 1459, 358, 279, 13895, 9911, 2987, 18, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
cwd=None, input=None, enter_chroot=False): """Runs a shell command.
cwd=None, input=None, enter_chroot=False, shell=False): """Runs a command.
def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None, exit_code=False, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None, enter_chroot=False): """Runs a shell command. Keyword arguments: cmd - cmd to run. Should be input to subprocess.POpen. If a string, converted to an array using split(). print_cmd -- prints the command before running it. error_ok -- does not raise an exception on error. error_message -- prints out this message when an error occurrs. exit_code -- returns the return code of the shell command. redirect_stdout -- returns the stdout. redirect_stderr -- holds stderr output until input is communicated. cwd -- the working directory to run this cmd. input -- input to pipe into this command through stdin. enter_chroot -- this command should be run from within the chroot. If set, cwd must point to the scripts directory. Raises: Exception: Raises generic exception on error with optional error_message. """ # Set default for variables. stdout = None stderr = None stdin = None output = '' # Modify defaults based on parameters. if redirect_stdout: stdout = subprocess.PIPE if redirect_stderr: stderr = subprocess.PIPE if input: stdin = subprocess.PIPE if enter_chroot: cmd = ['./enter_chroot.sh', '--'] + cmd # Print out the command before running. if print_cmd: Info('RunCommand: %s' % ' '.join(cmd)) try: proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr) (output, error) = proc.communicate(input) if exit_code: return proc.returncode if not error_ok and proc.returncode: raise Exception('Command "%s" failed.\n' % (' '.join(cmd)) + (error_message or error or output or '')) except Exception,e: if not error_ok: raise else: Warning(str(e)) return output
2e79fe48ce361b93d726cab4d55fbef17aa1d9c9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9626/2e79fe48ce361b93d726cab4d55fbef17aa1d9c9/cros_build_lib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1939, 2189, 12, 4172, 16, 1172, 67, 4172, 33, 5510, 16, 555, 67, 601, 33, 8381, 16, 555, 67, 2150, 33, 7036, 16, 2427, 67, 710, 33, 8381, 16, 3136, 67, 10283, 33, 8381, 16, 3136, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1939, 2189, 12, 4172, 16, 1172, 67, 4172, 33, 5510, 16, 555, 67, 601, 33, 8381, 16, 555, 67, 2150, 33, 7036, 16, 2427, 67, 710, 33, 8381, 16, 3136, 67, 10283, 33, 8381, 16, 3136, 6...
paths = [os.path.normpath(os.path.abspath(path)) for path in paths]
paths = [os.path.abspath(path) for path in paths]
def __init__(self, book, fonts, options, logger, paths): ''' Convert HTML files at C{paths} and add to C{book}. After creating the object, you must call L{self.writeto} to output the LRF/S file. @param book: The LRF book @type book: L{libprs500.lrf.pylrs.Book} @param fonts: dict specifying the font families to use ''' # Defaults for various formatting tags object.__setattr__(self, 'options', options) self.logger = logger self.fonts = fonts #: dict specifying font families to use # Memory self.scaled_images = {} #: Temporary files with scaled version of images self.rotated_images = {} #: Temporary files with rotated version of images self.text_styles = []#: Keep track of already used textstyles self.block_styles = []#: Keep track of already used blockstyles self.images = {} #: Images referenced in the HTML document self.targets = {} #: <a name=...> and id elements self.links = deque() #: <a href=...> elements self.processed_files = [] self.extra_toc_entries = [] #: TOC entries gleaned from semantic information self.image_memory = [] self.id_counter = 0 self.unused_target_blocks = [] #: Used to remove extra TextBlocks self.link_level = 0 #: Current link level self.memory = [] #: Used to ensure that duplicate CSS unhandled erros are not reported self.tops = {} #: element representing the top of each HTML file in the LRF file self.previous_text = '' #: Used to figure out when to lstrip self.preserve_block_style = False #: Used so that <p> tags in <blockquote> elements are handles properly # Styles self.blockquote_style = book.create_block_style(sidemargin=60, topskip=20, footskip=20) self.unindented_style = book.create_text_style(parindent=0) self.in_table = False # List processing self.list_level = 0 self.list_indent = 20 self.list_counter = 1 self.book = book #: The Book object representing a BBeB book self.override_css = {} self.override_pcss = {} if self._override_css is not None: if os.access(self._override_css, os.R_OK): src = open(self._override_css, 'rb').read() else: src = self._override_css match = self.PAGE_BREAK_PAT.search(src) if match and not re.match('avoid', match.group(1), re.IGNORECASE): self.page_break_found = True ncss, npcss = self.parse_css(src) if ncss: update_css(ncss, self.override_css) if npcss: update_css(npcss, self.override_pcss) paths = [os.path.normpath(os.path.abspath(path)) for path in paths] while len(paths) > 0 and self.link_level <= self.link_levels: for path in paths: if path in self.processed_files: continue try: self.add_file(path) except KeyboardInterrupt: raise except: if self.link_level == 0: # Die on errors in the first level raise for link in self.links: if link['path'] == path: self.links.remove(link) break self.logger.warn('Could not process '+path) if self.verbose: self.logger.exception(' ') self.links = self.process_links() self.link_level += 1 paths = [link['path'] for link in self.links] for text, tb in self.extra_toc_entries: ascii_text = text.encode('ascii', 'ignore') self.book.addTocEntry(ascii_text, tb)
d2eba932cadff1972624ee4b3a3e03097c6aa553 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9125/d2eba932cadff1972624ee4b3a3e03097c6aa553/convert_from.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 6978, 16, 16450, 16, 702, 16, 1194, 16, 2953, 4672, 9163, 4037, 3982, 1390, 622, 385, 95, 4481, 97, 471, 527, 358, 385, 95, 3618, 5496, 7360, 4979, 326, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 2738, 972, 12, 2890, 16, 6978, 16, 16450, 16, 702, 16, 1194, 16, 2953, 4672, 9163, 4037, 3982, 1390, 622, 385, 95, 4481, 97, 471, 527, 358, 385, 95, 3618, 5496, 7360, 4979, 326, ...
class Taggable(object):
class _Taggable(object):
def _getCachedInfo(self, *key_names): """Returns the cached collection of info regarding this object If not available in cache, it will be downloaded first. """ if not self._cached_info: self._cached_info = self._getInfo() if not self._cached_info: return None value_or_container = self._cached_info for key in key_names: if not len(value_or_container): return None value_or_container = value_or_container[key] return value_or_container
903c9b1622fe56617e5099f20abdc69f2090f8e0 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/9926/903c9b1622fe56617e5099f20abdc69f2090f8e0/pylast.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 9839, 966, 12, 2890, 16, 380, 856, 67, 1973, 4672, 3536, 1356, 326, 3472, 1849, 434, 1123, 29012, 333, 733, 971, 486, 2319, 316, 1247, 16, 518, 903, 506, 13549, 1122, 18, 353...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 588, 9839, 966, 12, 2890, 16, 380, 856, 67, 1973, 4672, 3536, 1356, 326, 3472, 1849, 434, 1123, 29012, 333, 733, 971, 486, 2319, 316, 1247, 16, 518, 903, 506, 13549, 1122, 18, 353...
>>> triple_quoted_incomplete("'''")
>>> triple_quoted_incomplete("a('''")
def triple_quoted_incomplete(line): """ Test if line contains an incomplete triple-quoted string. >>> triple_quoted_incomplete("'''") True >>> triple_quoted_incomplete("''''''") False >>> triple_quoted_incomplete("'''''''''") True """ if line.count('"""'): return bool(line.count('"""') % 2) if line.count("'''"): return bool(line.count("'''") % 2) return False
adda59986218256322a485e046eba115a24e34d1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3111/adda59986218256322a485e046eba115a24e34d1/pep8.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 14543, 67, 15179, 67, 17624, 12, 1369, 4672, 3536, 7766, 309, 980, 1914, 392, 14715, 14543, 17, 15179, 533, 18, 225, 4080, 14543, 67, 15179, 67, 17624, 2932, 69, 2668, 11, 4970, 13, 1053...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 14543, 67, 15179, 67, 17624, 12, 1369, 4672, 3536, 7766, 309, 980, 1914, 392, 14715, 14543, 17, 15179, 533, 18, 225, 4080, 14543, 67, 15179, 67, 17624, 2932, 69, 2668, 11, 4970, 13, 1053...
"""%(more_attr, original, default, table, table, table, more_attr))
"""%(more_attr, original, default, table_name, table_name, table_name, more_attr))
def add_column(self, table, col_name, attr_dict, default='NULL'): """ Takes a while, thanks to SQLite... """ # Check input: if not self.__skeleton__.has_key(table): raise ValueError("Database has no table %s."%table) if self.__skeleton__[table].has_key(col_name): raise ValueError("Table %s already has column %s."%(table,col_name)) attr_dict = verify_column(attr_dict) # Get an ordered list: cur_list = skel_to_col_attr_list(self.__skeleton__[table]) # Update the skeleton: self.__skeleton__[table][col_name] = attr_dict original = '' for col in cur_list: original += col[0] +', ' original = original.rstrip(', ') more = original + ', ' + col_name more_attr = '' for col in cur_list: if col[2]: # If primary key: more_attr += col[0] + ' ' + col[1] + ' primary key, ' else: more_attr += col[0] + ' ' + col[1] + ', ' more_attr += col_name + ' ' + attr_dict['sql'] # ROBERT: Look at the new fun way to do this... # executescript runs a begin transaction and commit so this # should speed things up for even large amounts of data # Silly SQLite -- we have to make a temp table to hold info... self.__connection__.executescript(""" create temporary table spam(%s); insert into spam select %s, %s from %s; drop table %s; create table %s (%s); """%(more_attr, original, default, table, table, table, more_attr)) # Update indices in new table new_table_set_col_attr(self.__connection__, table, self.__skeleton__[table]) # Now we can plop our data into the *new* table: self.__connection__.executescript(""" insert into %s select %s from spam; drop table spam; """%(table, more)) self.vacuum()
ccd462ba82c8e638768d9aaffb11ed63f3c9a76d /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9890/ccd462ba82c8e638768d9aaffb11ed63f3c9a76d/database.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 2827, 12, 2890, 16, 1014, 16, 645, 67, 529, 16, 1604, 67, 1576, 16, 805, 2218, 8560, 11, 4672, 3536, 23004, 279, 1323, 16, 286, 19965, 358, 16192, 2777, 225, 3536, 468, 2073, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 67, 2827, 12, 2890, 16, 1014, 16, 645, 67, 529, 16, 1604, 67, 1576, 16, 805, 2218, 8560, 11, 4672, 3536, 23004, 279, 1323, 16, 286, 19965, 358, 16192, 2777, 225, 3536, 468, 2073, ...
resource = open(file, "w")
resource = open(fixed_filename, "w")
def write_file(filename, dst_type, data, **kwargs): """ This function performs the steps necessary to write an output file. One can pass a data filename or an output filename. If a data filename is passed, the data file extension, output file extension and the replace keyword must be passed. The expected data object to write to the file is a SOM. Parameters: ---------- -> filename is a string containing the name of the data file from which the output is generated or the name of an output file -> dst_type is a string containing the MIME type of the output formatter -> data is a SOM that contains the output to be written to file -> kwargs is a list of key word arguments that the function accepts: message=<string> This is part of the message that will be printed to STDOUT if verbose is switched on. The default message is \"output file\" data_ext=<string> This is the extension on the data file. This is used in conjunction with output_ext and replace to convert the data filename into an output filename. The default value is \"nxs\". output_ext=<string> This is the extension to be used for the output file. The default value is \"txt\". verbose=<True or False> This determines whether or not the print statement is executed. The default value is False. replace=<True or False> This determines whether or not the parameter filename is modifed to produce the output filename """ import os import DST import hlr_utils try: message = kwargs["message"] except KeyError: message = "output file" try: data_ext = kwargs["data_ext"] except KeyError: data_ext = "nxs" try: output_ext = kwargs["output_ext"] except KeyError: output_ext = "txt" try: verbose = kwargs["verbose"] except KeyError: verbose = False try: replace = kwargs["replace"] except KeyError: replace = True if replace: file = os.path.basename(filename) path = os.path.join(os.getcwd(), file) file = hlr_utils.ext_replace(path, data_ext, output_ext) else: file = filename resource = open(file, "w") output_dst = DST.getInstance(dst_type, resource) if verbose: print "Writing %s" % message output_dst.writeSOM(data) output_dst.release_resource()
aed9fc6677d3ad19b72ea3d7da2fc9d6d0cf6bf8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/763/aed9fc6677d3ad19b72ea3d7da2fc9d6d0cf6bf8/hlr_driver_helper.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1045, 67, 768, 12, 3459, 16, 3046, 67, 723, 16, 501, 16, 2826, 4333, 4672, 3536, 1220, 445, 11199, 326, 6075, 4573, 358, 1045, 392, 876, 585, 18, 6942, 848, 1342, 279, 501, 1544, 578, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1045, 67, 768, 12, 3459, 16, 3046, 67, 723, 16, 501, 16, 2826, 4333, 4672, 3536, 1220, 445, 11199, 326, 6075, 4573, 358, 1045, 392, 876, 585, 18, 6942, 848, 1342, 279, 501, 1544, 578, ...
fullname = Utils.canonstr(fullname)
fullname = Utils.canonstr(fullname, lang)
def sigterm_handler(signum, frame, mlist=mlist): mlist.Unlock() sys.exit(0)
7b07a42bfbbee598b73a790b53bec7f8dcd3ce3d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2120/7b07a42bfbbee598b73a790b53bec7f8dcd3ce3d/confirm.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3553, 6408, 67, 4176, 12, 2977, 379, 16, 2623, 16, 312, 1098, 33, 781, 376, 4672, 312, 1098, 18, 7087, 1435, 2589, 18, 8593, 12, 20, 13, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3553, 6408, 67, 4176, 12, 2977, 379, 16, 2623, 16, 312, 1098, 33, 781, 376, 4672, 312, 1098, 18, 7087, 1435, 2589, 18, 8593, 12, 20, 13, 2, -100, -100, -100, -100, -100, -100, -100, ...
def serve(port, callback=None, finalizer=None):
def serve(port, callback=None, completer=None):
def serve(port, callback=None, finalizer=None): import BaseHTTPServer, SocketServer, mimetools, select # Patch up mimetools.Message so it doesn't break if rfc822 is reloaded. class Message(mimetools.Message): def __init__(self, fp, seekable=1): Message = self.__class__ Message.__bases__[0].__bases__[0].__init__(self, fp, seekable) self.encodingheader = self.getheader('content-transfer-encoding') self.typeheader = self.getheader('content-type') self.parsetype() self.parseplist() class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler): def send_document(self, title, contents): try: self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(html.page(title, contents)) except IOError: pass def do_GET(self): path = self.path if path[-5:] == '.html': path = path[:-5] if path[:1] == '/': path = path[1:] if path and path != '.': try: obj = locate(path) except ErrorDuringImport, value: self.send_document(path, html.escape(str(value))) return if obj: self.send_document(describe(obj), html.document(obj, path)) else: self.send_document(path,
cdd3c5fa22c1696864e701e1c4335969c936cd77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3187/cdd3c5fa22c1696864e701e1c4335969c936cd77/pydoc.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 12175, 12, 655, 16, 1348, 33, 7036, 16, 31848, 33, 7036, 4672, 1930, 3360, 3693, 2081, 16, 8758, 2081, 16, 20369, 278, 8192, 16, 2027, 225, 468, 12042, 731, 20369, 278, 8192, 18, 1079, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 12175, 12, 655, 16, 1348, 33, 7036, 16, 31848, 33, 7036, 4672, 1930, 3360, 3693, 2081, 16, 8758, 2081, 16, 20369, 278, 8192, 16, 2027, 225, 468, 12042, 731, 20369, 278, 8192, 18, 1079, ...
return
return
def consolidate(self, result, batch_result): log.debug("batch result is %s" , batch_result) try: output, testsRun, failures, errors, errorClasses = batch_result except ValueError: log.debug("result in unexpected format %s", batch_result) failure.Failure(*sys.exc_info())(result) return self.stream.write(output) result.testsRun += testsRun result.failures.extend(failures) result.errors.extend(errors) for key, (storage, label, isfail) in errorClasses.items(): if key not in result.errorClasses: # Ordinarily storage is result attribute # but it's only processed through the errorClasses # dict, so it's ok to fake it here result.errorClasses[key] = ([], label, isfail) mystorage, _junk, _junk = result.errorClasses[key] mystorage.extend(storage) log.debug("Ran %s tests (%s)", testsRun, result.testsRun)
45b0ee9c7bf08aa4dd3972193f83aa9b5d9d465b /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/11821/45b0ee9c7bf08aa4dd3972193f83aa9b5d9d465b/multiprocess.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 21785, 340, 12, 2890, 16, 563, 16, 2581, 67, 2088, 4672, 613, 18, 4148, 2932, 5303, 563, 353, 738, 87, 6, 269, 2581, 67, 2088, 13, 775, 30, 876, 16, 7434, 1997, 16, 11720, 16, 1334, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 21785, 340, 12, 2890, 16, 563, 16, 2581, 67, 2088, 4672, 613, 18, 4148, 2932, 5303, 563, 353, 738, 87, 6, 269, 2581, 67, 2088, 13, 775, 30, 876, 16, 7434, 1997, 16, 11720, 16, 1334, ...
return self._executeSingleStorageElementFunction(storageElementName,storageDirectory,'removeDirectory',argsDict={'recursive':recursive}) else: return self._executeStorageElementFunction(storageElementName,storageDirectory,'removeDirectory',argsDict={'recursive':recursive}) class StorageInterface(StorageFile,StorageDirectory):
return self._executeSingleStorageElementFunction( storageElementName, storageDirectory, 'removeDirectory', argsDict={'recursive':recursive} ) else: return self._executeStorageElementFunction( storageElementName, storageDirectory, 'removeDirectory', argsDict={'recursive':recursive} ) class StorageInterface( StorageFile, StorageDirectory ):
def removeStorageDirectory(self,storageDirectory,storageElementName,recursive=False,singleDirectory=False): """ Revove a directory from the storage element 'storageDirectory' is the pfn(s) directory to be removed 'storageElementName' is the Storage Element """ if singleDirectory: return self._executeSingleStorageElementFunction(storageElementName,storageDirectory,'removeDirectory',argsDict={'recursive':recursive}) else: return self._executeStorageElementFunction(storageElementName,storageDirectory,'removeDirectory',argsDict={'recursive':recursive})
9fabceb719d19d46d8b75011d2932552dbe360f9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12864/9fabceb719d19d46d8b75011d2932552dbe360f9/ReplicaManager.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1206, 3245, 2853, 12, 2890, 16, 5697, 2853, 16, 5697, 30584, 16, 10543, 33, 8381, 16, 7526, 2853, 33, 8381, 4672, 3536, 14477, 841, 279, 1867, 628, 326, 2502, 930, 225, 296, 5697, 2853, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1206, 3245, 2853, 12, 2890, 16, 5697, 2853, 16, 5697, 30584, 16, 10543, 33, 8381, 16, 7526, 2853, 33, 8381, 4672, 3536, 14477, 841, 279, 1867, 628, 326, 2502, 930, 225, 296, 5697, 2853, ...
if self.parser.elementInScope(name):
if self.parser.elementInScope(name, True):
def endTagTableCell(self, name): if self.parser.elementInScope(name): self.parser.generateImpliedEndTags(name) if self.parser.openElements[-1].name != name: self.parser.parseError() node = self.parser.openElements.pop() while node.name != name: node = self.parser.openElements.pop() self.parser.clearActiveFormattingElements() self.parser.switchInsertionMode("inRow") else: self.parser.parseError()
314930f356a6a5ba3005a20cc8267d4533fc779d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9368/314930f356a6a5ba3005a20cc8267d4533fc779d/parser.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 29765, 1388, 4020, 12, 2890, 16, 508, 4672, 309, 365, 18, 4288, 18, 2956, 382, 3876, 12, 529, 16, 1053, 4672, 365, 18, 4288, 18, 7163, 2828, 2092, 1638, 3453, 12, 529, 13, 309, 365, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 29765, 1388, 4020, 12, 2890, 16, 508, 4672, 309, 365, 18, 4288, 18, 2956, 382, 3876, 12, 529, 16, 1053, 4672, 365, 18, 4288, 18, 7163, 2828, 2092, 1638, 3453, 12, 529, 13, 309, 365, ...
class ModelFirst2(BaseModel): name = u"A→B, C→D" def getInitialParameters(self, time): return ([10 / (time[len(time) / 2] - time[0]), 3 / (time[len(time) / 2] - time[0])], [True, True]) def rcalc(self, k, a_0, t, y): """ Function used by ngml, but not by ngml2. Parameter k (=parameter) is a column vector. Parameter a_0 is a number. Parameter t is a list of time values. Parameter y is a column vector of measured values. Returns tuple of three values: - first value is a list of residuals 'r' - second value is a matrix of concentrations 'c' - third value is a matrix 'a' """ c0 = a_0 * matlib.exp(-k[0,0] * t) c1 = a_0 * matlib.exp(-k[1,0] * t) c = matlib.hstack((c0, c1)) a = numpy.matrix(nonneglstsq.nonneglstsq(c.getA(), y.getA1())[0]).T r = y - matlib.dot(c, a) return (r, c, a)
def rcalc(self, k, a_0, t, y): """ Function used by ngml, but not by ngml2. Parameter k (=parameter) is a column vector. Parameter a_0 is a number. Parameter t is a list of time values. Parameter y is a column vector of measured values. Returns tuple of three values: - first value is a list of residuals 'r' - second value is a matrix of concentrations 'c' - third value is a matrix 'a' """ # First column of C contains concentrations of A c = a_0 * matlib.exp(-k[0,0] * t)
bbe3f10e4c957a215ad479cad9a3a0f1cc9edd6e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10129/bbe3f10e4c957a215ad479cad9a3a0f1cc9edd6e/ngml.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 436, 12448, 12, 2890, 16, 417, 16, 279, 67, 20, 16, 268, 16, 677, 4672, 3536, 4284, 1399, 635, 10944, 781, 16, 1496, 486, 635, 10944, 781, 22, 18, 5498, 417, 261, 33, 6775, 13, 353, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 436, 12448, 12, 2890, 16, 417, 16, 279, 67, 20, 16, 268, 16, 677, 4672, 3536, 4284, 1399, 635, 10944, 781, 16, 1496, 486, 635, 10944, 781, 22, 18, 5498, 417, 261, 33, 6775, 13, 353, ...
if self._parent:
if self._parent is not None:
def _reverseParents(self): if self._parent: for parent in self._parent._reverseParents(): yield parent yield self._parent
d6d83c745583cf0150544c0c0286863a29fa1c3f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8747/d6d83c745583cf0150544c0c0286863a29fa1c3f/use.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 9845, 13733, 12, 2890, 4672, 309, 365, 6315, 2938, 353, 486, 599, 30, 364, 982, 316, 365, 6315, 2938, 6315, 9845, 13733, 13332, 2824, 982, 2824, 365, 6315, 2938, 2, 0, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 9845, 13733, 12, 2890, 4672, 309, 365, 6315, 2938, 353, 486, 599, 30, 364, 982, 316, 365, 6315, 2938, 6315, 9845, 13733, 13332, 2824, 982, 2824, 365, 6315, 2938, 2, -100, -100, -100...
self._language_cache[lang][model][record_id]
language_cache[lang][model][record_id]
def setLang(self, lang): self._context = self._context.copy() prev_lang = self._context.get('language') or 'en_US' self._context['language'] = lang for model in self._cache: for record_id in self._cache[model]: self._language_cache.setdefault(prev_lang, {}).setdefault(model, {})[record_id] = \ self._cache[model][record_id] if lang in self._language_cache \ and model in self._language_cache[lang] \ and record_id in self._language_cache[lang][model]: self._cache[model][record_id] = \ self._language_cache[lang][model][record_id] else: self._cache[model][record_id] = {'id': record_id}
25adb9916575fbcb4348b167f6e70aa3d1da28fe /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/9266/25adb9916575fbcb4348b167f6e70aa3d1da28fe/browse.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 444, 7275, 12, 2890, 16, 3303, 4672, 365, 6315, 2472, 273, 365, 6315, 2472, 18, 3530, 1435, 2807, 67, 4936, 273, 365, 6315, 2472, 18, 588, 2668, 4923, 6134, 578, 296, 275, 67, 3378, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 444, 7275, 12, 2890, 16, 3303, 4672, 365, 6315, 2472, 273, 365, 6315, 2472, 18, 3530, 1435, 2807, 67, 4936, 273, 365, 6315, 2472, 18, 588, 2668, 4923, 6134, 578, 296, 275, 67, 3378, 11...
if dataLen < 5: return (None, data)
def _UnpackOneTerm(data): dataLen = len(data) if len(data) == 0: return (None, data) data0 = ord(data[0]) if data0 == MAGIC_SMALL_INTEGER: if dataLen < 2: return (None, data) n = _ReadInt1(data[1]) return (ErlNumber(n), data[2:]) elif data0 == MAGIC_INTEGER: if dataLen < 5: return (None, data) n = _ReadInt4(data[1:5]) return (ErlNumber(n), data[5:]) elif data0 == MAGIC_FLOAT: if dataLen < 32: return (None, data) floatData = data[1:32] try: nullIndex = string.index(floatData, chr(0)) floatStr = floatData[0:nullIndex] except ValueError: floatStr = floatData f = string.atof(floatStr) return (ErlNumber(f), data[32:]) elif data0 == MAGIC_ATOM: if dataLen < 3: return (None, data) atomLen = _ReadInt2(data[1:3]) if dataLen < 3 + atomLen: return (None, data) atomText = data[3:3 + atomLen] return (ErlAtom(atomText), data[3 + atomLen:]) elif data0 == MAGIC_REFERENCE: (node, remainingData) = _UnpackOneTerm(data[1:]) if node == None: return (None, data) if len(remainingData) < 5: return (None, data) id = _ReadId(remainingData[0:4]) creation = _ReadCreation(remainingData[4]) return (ErlRef(node, id, creation), remainingData[5:]) elif data0 == MAGIC_PORT: (node, remainingData) = _UnpackOneTerm(data[1:]) if node == None: return (None, data) if len(remainingData) < 5: return (None, data) id = _ReadId(remainingData[0:4]) creation = _ReadCreation(remainingData[4]) return (ErlPort(node, id, creation), remainingData[5:]) elif data0 == MAGIC_PID: (node, remainingData) = _UnpackOneTerm(data[1:]) if node == None: return (None, data) if len(remainingData) < 9: return (None, data) id = _ReadId(remainingData[0:4], 15) serial = _ReadInt4(remainingData[4:8]) creation = _ReadCreation(remainingData[8]) return (ErlPid(node, id, serial, creation), remainingData[9:]) elif data0 == MAGIC_SMALL_TUPLE: if dataLen < 2: return (None, data) arity = _ReadInt1(data[1]) (elements, remainingData) = _UnpackTermSeq(arity, data[2:]) if elements == None: return (None, data) return (ErlTuple(elements), remainingData) elif data0 == MAGIC_LARGE_TUPLE: if dataLen < 5: return (None, data) arity = _ReadInt4(data[1:5]) (elements, remainingData) = _UnpackTermSeq(arity, data[5:]) if elements == None: return (None, data) return (ErlTuple(elements), remainingData) elif data0 == MAGIC_NIL: return (ErlList([]), data[1:]) elif data0 == MAGIC_STRING: if dataLen < 3: return (None, data) strlen = _ReadInt2(data[1:3]) if dataLen < 3 + strlen: return (None, data) s = data[3:3 + strlen] return (ErlString(s), data[3 + strlen:]) elif data0 == MAGIC_LIST: if dataLen < 5: return (None, data) arity = _ReadInt4(data[1:5]) (elements, remainingData) = _UnpackTermSeq(arity, data[5:]) if elements == None: return (None, data) return (ErlList(elements), remainingData[1:]) # skip MAGIC_NIL elif data0 == MAGIC_BINARY: if dataLen < 5: return (None, data) binlen = _ReadInt4(data[1:5]) if dataLen < 5 + binlen: return (None, data) s = data[5:5 + binlen] return (ErlBinary(s), data[5 + binlen:]) elif data0 == MAGIC_SMALL_BIG: if dataLen < 2: return (None, data) n = _ReadInt1(data[1]) if dataLen < 2 + 1 + n: return (None, data) sign = _ReadInt1(data[2]) bignum = 0L for i in range(n): d = _ReadInt1(data[3 + n - i - 1]) bignum = bignum * 256L + long(d) if sign: bignum = bignum * -1L return (ErlNumber(bignum), data[3 + n:]) elif data0 == MAGIC_LARGE_BIG: if dataLen < 5: return (None, data) n = _ReadInt4(data[1:5]) if dataLen < 5 + 1 + n: return (None, data) sign = _ReadInt1(data[5]) bignum = 0L for i in range(n): d = _ReadInt1(data[6 + n - i - 1]) bignum = bignum * 256L + long(d) if sign: bignum = bignum * -1L return (ErlNumber(bignum), data[6 + n:]) elif data0 == MAGIC_NEW_CACHE: if dataLen < 4: return (None, data) index = _ReadInt1(data[1]) atomLen = _ReadInt2(data[2:4]) if dataLen < 4 + atomLen: return (None, data) atomText = data[4:4 + atomLen] return (ErlAtom(atomText, cache=index), data[4 + atomLen:]) elif data0 == MAGIC_CACHED_ATOM: if dataLen < 2: return (None, data) index = _ReadInt1(data[1]) return (ErlAtom(None, cache=index), data[2:]) elif data0 == MAGIC_NEW_REFERENCE: if dataLen < 3: return (None, data) idLen = _ReadInt2(data[1:3]) (node, remainingData) = _UnpackOneTerm(data[3:]) if node == None: return (None, data) nprim = 4 * idLen if len(remainingData) < 1 + nprim: return (None, data) creation = _ReadCreation(remainingData[0]) remainingData = remainingData[1:] id0 = _ReadId(remainingData[0:4]) ids = [id0] remainingData = remainingData[4:] for i in range(idLen-1): id = _ReadInt4(remainingData[0:4]) remainingData = remainingData[4:] ids.append(id) return (ErlRef(node, ids, creation), remainingData) elif data0 == MAGIC_FUN: if dataLen < 5: return (None, data) freevarsLen = _ReadInt4(data[1:5]) (pid, remainingData1) = _UnpackOneTerm(data[5:]) if pid == None: return (None, data) (module, remainingData2) = _UnpackOneTerm(remainingData1) if module == None: return (None, data) (index, remainingData3) = _UnpackOneTerm(remainingData2) if index == None: return (None, data) (uniq, remainingData4) = _UnpackOneTerm(remainingData3) if uniq == None: return (None, data) (freeVars, remainingData5) = _UnpackTermSeq(freevarsLen,remainingData4) if freeVars == None: return (None, data) print "MAGIC_FUN" print pid print module print index print uniq print freeVars return (ErlFun(pid, module, index, uniq, freeVars), remainingData5) else: print "Bad tag %s" % `data0` return (None, data)
cadc8319939cd691f548c4b841a357df13bff542 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/7565/cadc8319939cd691f548c4b841a357df13bff542/erl_term.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 23649, 3335, 4065, 12, 892, 4672, 501, 2891, 273, 562, 12, 892, 13, 225, 309, 562, 12, 892, 13, 422, 374, 30, 327, 261, 7036, 16, 501, 13, 225, 501, 20, 273, 4642, 12, 892, 63...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 389, 23649, 3335, 4065, 12, 892, 4672, 501, 2891, 273, 562, 12, 892, 13, 225, 309, 562, 12, 892, 13, 422, 374, 30, 327, 261, 7036, 16, 501, 13, 225, 501, 20, 273, 4642, 12, 892, 63...
if not os.path.exists(env.prefs[nanohive_path_prefs_key]):
if not os.path.exists(env.prefs[povray_path_prefs_key]):
def raytrace_scene_using_povray(assy, pov, width, height, output_type="png"): '''Render the POV-Ray scene file <pov>. The output image is placed next to <pov> with the extension based on <output_type>. <width>, <height> are the width and height of the rendered image. (int) <output_type> is the extension of the output image (currently only 'png' and 'bmp' are supported). Return values: 0 = successful 1 = POV-Ray plug-in not enabled 2 = POV-Ray plug-in path is empty 3 = POV-Ray plug-in path points to a file that does not exist 4 = POV-Ray plug-in is not Version 3.6 or higher (not currently supported) - Mark 060529. 5 = Unsupported output image format. 6 = POV-Ray failed for some reason. ''' errmsgs = ["Error: POV-Ray plug-in not enabled.", "Error: POV-Ray Plug-in path is empty.", "Error: POV-Ray plug-in path points to a file that does not exist.", "Error: POV-Ray plug-in is not version 3.6", "Error: Unsupported output image format: ", "Error: POV-Ray failed."] # Validate that the POV-Ray plug-in is enabled. if not env.prefs[povray_enabled_prefs_key]: r = activate_povray_plugin(assy.w) if r: return 1, errmsgs[0] # POV-Ray plug-in not enabled. povray_exe = env.prefs[povray_path_prefs_key] if not povray_exe: return 2, errmsgs[1] # POV-Ray plug-in path is empty if not os.path.exists(env.prefs[nanohive_path_prefs_key]): return 3, errmsgs[2] # POV-Ray plug-in path points to a file that does not exist #r = verify_povray_program() # Not yet sure how to verify POV-Ray program. Mark 060529. #if r: # return 4, errmsgs[3] # POV-Ray plug-in is not Version 3.6 program = "\""+povray_exe+"\"" # POV-Ray (pvengine.exe) or MegaPOV (mpengine.exe) # POV-Ray has a special feature introduced in v3.5 called "I/O Restrictions" which attempts # to at least partially protect a machine running POV-Ray from having files read or written # outside of a given set of directories. This is a problem since we want POV-Ray (*.pov) # and image (*.png) files to be placed in $HOME/Nanorex/POV-Ray. # # There are at least three ways around POV-Ray's "I/O Restrictions" feature: # # 1. read/write files from/to the current directory (which is allowed), then move the files where # you want them. # 2. change dir (cd) to the working directory (i.e. $HOME/Nanorex/POV-Ray), start POV-Ray # and return to original directory. # 3. Create a POV-Ray "INI file" and have POV-Ray use it instead of command-line options. This # is probably the best long-term solution, but I need to investigate it more. To learn more about # this, search for "INI Files" in the POV-Ray Help Documentation. # # I went with option 2. Option 3 may be something to look into later. # # To learn more about this, search for "I/O Restrictions" in the POV-Ray Help Documentation. # Mark 060529. if output_type == 'png': output_ext = '.png' pov_commandline_filetype = 'N' # 'N' = PNG (portable network graphics) format elif output_type == 'bmp': output_ext = '.bmp' pov_commandline_filetype = 'S' # 'S' = System-specific such as Mac Pict or Windows BMP else: return 5, errmsgs[4] + output_type # tmp_pov and tmp_out are the basenames of the pov and image output file. # Later we'll cd to the pov file's directory and use these filenames in the POV-Ray command-line. # This helps us get around POV-Ray's I/O Restrictions. Mark 060529. workdir, tmp_pov = os.path.split(pov) base, ext = os.path.splitext(tmp_pov) tmp_out = base + output_ext # POV-Ray command-line options. input_fn = "Input_File_Name=\'%s\'" % tmp_pov output_fn = "Output_File_Name=\'%s\'" % tmp_out w = "+W%d" % width h = "+H%d" % height aa="+A" filetype = "+F%s" % pov_commandline_filetype # Other POV-Ray command-line options (currently not used): lib = "+L\'C:/Program Files/POV-Ray for Windows v3.6/include\'" # Needed when program='megapov.exe', but not pvengine.exe or mgengine.exe. # megapov.exe provides a way to render a scene without invoking the POV-Ray GUI on Windows. # megapov.exe may be the way to go on Windows when I figure out how to direct output # to the GLPane or a separate window. Mark 060529. exit = "/EXIT" # This switch is available for Windows only. It causes the POV-Ray GUI to exit as soon as it finished # rendering the image. The problem with this is that the picture exits, too. Mark 060529. # Render scene. try: args = [program] + [input_fn] + [output_fn] + [w] + [h] + [aa] + [filetype] print "Launching POV-Ray: \n povray_exe=", povray_exe, "\n args are %r" % (args,) arguments = QStringList() for arg in args: if arg != "": arguments.append(arg) p = QProcess() p.setArguments(arguments) wd = QDir(workdir) p.setWorkingDirectory(wd) # This gets us around POV-Ray's 'I/O Restrictions' feature. #QApplication.setOverrideCursor( QCursor(Qt.WaitCursor) ) # For later. p.start() except: from debug import print_compact_traceback print_compact_traceback( "exception in raytrace_scene_using_povray(): " ) return 6, errmsgs[5] # QApplication.restoreOverrideCursor() # Restore the cursor. Later. return 0, ''
285e3ebb185d7adf9722dc2d1ef78b356995ee77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11221/285e3ebb185d7adf9722dc2d1ef78b356995ee77/povray.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 14961, 5129, 67, 23694, 67, 9940, 67, 84, 1527, 435, 12, 428, 93, 16, 293, 1527, 16, 1835, 16, 2072, 16, 876, 67, 723, 1546, 6446, 6, 4672, 9163, 3420, 326, 13803, 58, 17, 54, 528, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 14961, 5129, 67, 23694, 67, 9940, 67, 84, 1527, 435, 12, 428, 93, 16, 293, 1527, 16, 1835, 16, 2072, 16, 876, 67, 723, 1546, 6446, 6, 4672, 9163, 3420, 326, 13803, 58, 17, 54, 528, ...
if new_confd_hmac_key or not os.path.exists(constants.CONFD_HMAC_KEY): logging.debug("Writing new confd HMAC key to %s", constants.CONFD_HMAC_KEY) GenerateHmacKey(constants.CONFD_HMAC_KEY)
if new_confd_hmac_key or not os.path.exists(hmackey_file): logging.debug("Writing new confd HMAC key to %s", hmackey_file) GenerateHmacKey(hmackey_file)
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key, rapi_cert_pem=None): """Updates the cluster certificates, keys and secrets. @type new_cluster_cert: bool @param new_cluster_cert: Whether to generate a new cluster certificate @type new_rapi_cert: bool @param new_rapi_cert: Whether to generate a new RAPI certificate @type new_confd_hmac_key: bool @param new_confd_hmac_key: Whether to generate a new HMAC key @type rapi_cert_pem: string @param rapi_cert_pem: New RAPI certificate in PEM format """ # noded SSL certificate cluster_cert_exists = os.path.exists(constants.NODED_CERT_FILE) if new_cluster_cert or not cluster_cert_exists: if cluster_cert_exists: utils.CreateBackup(constants.NODED_CERT_FILE) logging.debug("Generating new cluster certificate at %s", constants.NODED_CERT_FILE) GenerateSelfSignedSslCert(constants.NODED_CERT_FILE) # confd HMAC key if new_confd_hmac_key or not os.path.exists(constants.CONFD_HMAC_KEY): logging.debug("Writing new confd HMAC key to %s", constants.CONFD_HMAC_KEY) GenerateHmacKey(constants.CONFD_HMAC_KEY) # RAPI rapi_cert_exists = os.path.exists(constants.RAPI_CERT_FILE) if rapi_cert_pem: # Assume rapi_pem contains a valid PEM-formatted certificate and key logging.debug("Writing RAPI certificate at %s", constants.RAPI_CERT_FILE) utils.WriteFile(constants.RAPI_CERT_FILE, data=rapi_cert_pem, backup=True) elif new_rapi_cert or not rapi_cert_exists: if rapi_cert_exists: utils.CreateBackup(constants.RAPI_CERT_FILE) logging.debug("Generating new RAPI certificate at %s", constants.RAPI_CERT_FILE) GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)
aeefe835be955a154e5d02b354d5e229201fdf5c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7542/aeefe835be955a154e5d02b354d5e229201fdf5c/bootstrap.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6654, 3629, 18048, 12, 2704, 67, 7967, 67, 7593, 16, 394, 67, 1266, 77, 67, 7593, 16, 394, 67, 3923, 72, 67, 19820, 67, 856, 16, 767, 7259, 67, 7593, 67, 20313, 33, 7036, 4672, 3536,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 6654, 3629, 18048, 12, 2704, 67, 7967, 67, 7593, 16, 394, 67, 1266, 77, 67, 7593, 16, 394, 67, 3923, 72, 67, 19820, 67, 856, 16, 767, 7259, 67, 7593, 67, 20313, 33, 7036, 4672, 3536,...
self.end_current_para()
if self.current_para.contents: self.current_block.append(self.current_para) self.current_para = Paragraph()
def update_css(ncss): for key in ncss.keys(): if self.css.has_key(key): self.css[key].update(ncss[key]) else: self.css[key] = ncss[key]
e39dc4223f880d6b721e75d35aaa36ac5ad96971 /local1/tlutelli/issta_data/temp/all_python//python/2007_temp/2007/9125/e39dc4223f880d6b721e75d35aaa36ac5ad96971/convert_from.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 67, 5212, 12, 82, 5212, 4672, 364, 498, 316, 290, 5212, 18, 2452, 13332, 309, 365, 18, 5212, 18, 5332, 67, 856, 12, 856, 4672, 365, 18, 5212, 63, 856, 8009, 2725, 12, 82, 5212,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1089, 67, 5212, 12, 82, 5212, 4672, 364, 498, 316, 290, 5212, 18, 2452, 13332, 309, 365, 18, 5212, 18, 5332, 67, 856, 12, 856, 4672, 365, 18, 5212, 63, 856, 8009, 2725, 12, 82, 5212,...
........ >>> import numpy as N
-------- >>> import numpy as NP
def build_rotate_matrix(img_data_parms): """ rot_matrix = reg.build_rotate_matrix(img_data_parms) takes the 6 element vector (3 angles, 3 translations) and build the 4x4 mapping matrix Parameters ....... img_data_parms : {nd_array} this is the current (6-dim) array with 3 angles and 3 translations. Returns ....... rot_matrix: {nd_array} the 4x4 mapping matrix Examples ........ >>> import numpy as N >>> import _registration as reg >>> imdata = reg.build_structs() >>> x = N.zeros(6, dtype=N.float64) >>> M = reg.build_rotate_matrix(x) >>> M array([[ 1., 0., 0., 0.], [ 0., 1., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 1.]]) """ R1 = N.zeros([4,4], dtype=N.float64); R2 = N.zeros([4,4], dtype=N.float64); R3 = N.zeros([4,4], dtype=N.float64); T = N.eye(4, dtype=N.float64); alpha = math.radians(img_data_parms[0]) beta = math.radians(img_data_parms[1]) gamma = math.radians(img_data_parms[2]) R1[0][0] = 1.0 R1[1][1] = math.cos(alpha) R1[1][2] = math.sin(alpha) R1[2][1] = -math.sin(alpha) R1[2][2] = math.cos(alpha) R1[3][3] = 1.0 R2[0][0] = math.cos(beta) R2[0][2] = math.sin(beta) R2[1][1] = 1.0 R2[2][0] = -math.sin(beta) R2[2][2] = math.cos(beta) R2[3][3] = 1.0 R3[0][0] = math.cos(gamma) R3[0][1] = math.sin(gamma) R3[1][0] = -math.sin(gamma) R3[1][1] = math.cos(gamma) R3[2][2] = 1.0 R3[3][3] = 1.0 T[0][0] = 1.0 T[1][1] = 1.0 T[2][2] = 1.0 T[3][3] = 1.0 T[0][3] = img_data_parms[3] T[1][3] = img_data_parms[4] T[2][3] = img_data_parms[5] rot_matrix = N.dot(T, R1); rot_matrix = N.dot(rot_matrix, R2); rot_matrix = N.dot(rot_matrix, R3); return rot_matrix
de681f926e982b0212f2f58bf637917926737f89 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12971/de681f926e982b0212f2f58bf637917926737f89/_registration.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1361, 67, 20342, 67, 5667, 12, 6081, 67, 892, 67, 29927, 4672, 3536, 4168, 67, 5667, 273, 960, 18, 3510, 67, 20342, 67, 5667, 12, 6081, 67, 892, 67, 29927, 13, 225, 5530, 326, 1666, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1361, 67, 20342, 67, 5667, 12, 6081, 67, 892, 67, 29927, 4672, 3536, 4168, 67, 5667, 273, 960, 18, 3510, 67, 20342, 67, 5667, 12, 6081, 67, 892, 67, 29927, 13, 225, 5530, 326, 1666, ...
res = self.getStorageFileExists(storageDirectory,storageElement,singleFile=True) if not res['OK']: gLogger.error("Failed to obtain existance of directory",res['Message'])
res = self.getStorageFileExists( storageDirectory, storageElement, singleFile=True ) if not res['OK']: gLogger.error( "Failed to obtain existance of directory", res['Message'] )
def __removeStorageDirectory(self,directory,storageElement): gLogger.info('Removing the contents of %s at %s' % (directory,storageElement)) res = self.getPfnForLfn([directory],storageElement) if not res['OK']: gLogger.error("Failed to get PFN for directory",res['Message']) return res for directory, error in res['Value']['Failed'].items(): gLogger.error('Failed to obtain directory PFN from LFN','%s %s' % (directory,error)) if res['Value']['Failed']: return S_ERROR('Failed to obtain directory PFN from LFNs') storageDirectory = res['Value']['Successful'].values()[0] res = self.getStorageFileExists(storageDirectory,storageElement,singleFile=True) if not res['OK']: gLogger.error("Failed to obtain existance of directory",res['Message']) return res exists = res['Value'] if not exists: gLogger.info("The directory %s does not exist at %s " % (directory,storageElement)) return S_OK() res = self.removeStorageDirectory(storageDirectory,storageElement,recursive=True,singleDirectory=True) if not res['OK']: gLogger.error("Failed to remove storage directory",res['Message']) return res gLogger.info("Successfully removed %d files from %s at %s" % (res['Value']['FilesRemoved'],directory,storageElement)) return S_OK()
9fabceb719d19d46d8b75011d2932552dbe360f9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12864/9fabceb719d19d46d8b75011d2932552dbe360f9/ReplicaManager.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 4479, 3245, 2853, 12, 2890, 16, 5149, 16, 5697, 1046, 4672, 314, 3328, 18, 1376, 2668, 18939, 326, 2939, 434, 738, 87, 622, 738, 87, 11, 738, 261, 5149, 16, 5697, 1046, 3719, 400...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 4479, 3245, 2853, 12, 2890, 16, 5149, 16, 5697, 1046, 4672, 314, 3328, 18, 1376, 2668, 18939, 326, 2939, 434, 738, 87, 622, 738, 87, 11, 738, 261, 5149, 16, 5697, 1046, 3719, 400...
cty.c_double],
cty.c_double],
def flimage_color_to_pixel(pImage, p2, p3, p4, p5): """ flimage_color_to_pixel(pImage, p2, p3, p4, p5) -> num. """ retval = _flimage_color_to_pixel(pImage, p2, p3, p4, p5) return retval
9942dac8ce2b35a1e43615a26fd8e7054ef805d3 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/2429/9942dac8ce2b35a1e43615a26fd8e7054ef805d3/xformslib.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1183, 2730, 67, 3266, 67, 869, 67, 11743, 12, 84, 2040, 16, 293, 22, 16, 293, 23, 16, 293, 24, 16, 293, 25, 4672, 3536, 1183, 2730, 67, 3266, 67, 869, 67, 11743, 12, 84, 2040, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1183, 2730, 67, 3266, 67, 869, 67, 11743, 12, 84, 2040, 16, 293, 22, 16, 293, 23, 16, 293, 24, 16, 293, 25, 4672, 3536, 1183, 2730, 67, 3266, 67, 869, 67, 11743, 12, 84, 2040, 16, ...
AKICK = alist(bot, 'akick', a_kick)
AKICK = alist(bot, 'akick', a_kick, True) AVISITOR = alist(bot, 'avisitor', a_visitor, True)
def a_moderator(item, reason): item.room.moderate('nick', item.nick, 'role', 'moderator', '')
bab40efca50c4e0d03eaefce7b8f5193f06e5b64 /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/2729/bab40efca50c4e0d03eaefce7b8f5193f06e5b64/alists.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 279, 67, 1711, 7385, 12, 1726, 16, 3971, 4672, 761, 18, 13924, 18, 1711, 12600, 2668, 17091, 2187, 761, 18, 17091, 16, 296, 4615, 2187, 296, 1711, 7385, 2187, 28707, 225, 2, 0, 0, 0, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 279, 67, 1711, 7385, 12, 1726, 16, 3971, 4672, 761, 18, 13924, 18, 1711, 12600, 2668, 17091, 2187, 761, 18, 17091, 16, 296, 4615, 2187, 296, 1711, 7385, 2187, 28707, 225, 2, -100, -100, ...
raise TracError, 'Unsupported version control system "%s"' \ % self.repository_type
raise TracError('Unsupported version control system "%s"' % self.repository_type)
def get_repository(self, authname): if not self._connector: candidates = [] for connector in self.connectors: for repos_type_, prio in connector.get_supported_types(): if self.repository_type != repos_type_: continue heappush(candidates, (-prio, connector)) if not candidates: raise TracError, 'Unsupported version control system "%s"' \ % self.repository_type self._connector = heappop(candidates)[1] return self._connector.get_repository(self.repository_type, self.repository_dir, authname)
0091f73d9ddb2e28de9da1a025ba0c0cfaf2b458 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/9317/0091f73d9ddb2e28de9da1a025ba0c0cfaf2b458/api.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 9071, 12, 2890, 16, 1357, 529, 4672, 309, 486, 365, 6315, 23159, 30, 7965, 273, 5378, 364, 8703, 316, 365, 18, 4646, 18886, 30, 364, 13686, 67, 723, 67, 16, 14705, 83, 316, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 336, 67, 9071, 12, 2890, 16, 1357, 529, 4672, 309, 486, 365, 6315, 23159, 30, 7965, 273, 5378, 364, 8703, 316, 365, 18, 4646, 18886, 30, 364, 13686, 67, 723, 67, 16, 14705, 83, 316, ...
colBtn.SetForegroundColour(wxColour(255, 255, 255))
colBtn.SetForegroundColour(wx.Colour(255, 255, 255))
def editColTCProp(self, colCb, colBtn, prop, val=None): if val is None: colStr = colCb.GetValue() else: colStr = val if colStr: col = strToCol(colStr%self.commonDefs) if self.editProp(colStr!='', prop, colStr): if colStr: colBtn.SetForegroundColour(wxColour(0, 0, 0)) colBtn.SetBackgroundColour(col) else: colBtn.SetForegroundColour(wxColour(255, 255, 255)) colBtn.SetBackgroundColour(\ wxSystemSettings_GetSystemColour(wxSYS_COLOUR_BTNFACE))
1e19321bfb8d6ff348cd85bdad3a1a4c2f517fa0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12725/1e19321bfb8d6ff348cd85bdad3a1a4c2f517fa0/STCStyleEditor.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3874, 914, 15988, 4658, 12, 2890, 16, 645, 15237, 16, 645, 20541, 16, 2270, 16, 1244, 33, 7036, 4672, 309, 1244, 353, 599, 30, 645, 1585, 273, 645, 15237, 18, 967, 620, 1435, 469, 30, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3874, 914, 15988, 4658, 12, 2890, 16, 645, 15237, 16, 645, 20541, 16, 2270, 16, 1244, 33, 7036, 4672, 309, 1244, 353, 599, 30, 645, 1585, 273, 645, 15237, 18, 967, 620, 1435, 469, 30, ...
s = [] s.append(document.header_to_unicode()) s.extend(self.process(document.root_element, stack, repeat))
s = self.process(document.root_element, stack, repeat)
def __call__(self, namespace={}): # XXX Rewrite with traverse2.
1001013c120160c2a94a44e51b7515d52b41d6ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12681/1001013c120160c2a94a44e51b7515d52b41d6ba/STL.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1991, 972, 12, 2890, 16, 1981, 12938, 4672, 468, 11329, 17851, 598, 10080, 22, 18, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1001, 1991, 972, 12, 2890, 16, 1981, 12938, 4672, 468, 11329, 17851, 598, 10080, 22, 18, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, ...
while(len(recv_sockets + send_sockets) > 0): try: read_list, write_list, error = \ select.select(recv_sockets, send_sockets, [], 20) except select.error, e: if e[0] != errno.EINTR: log.error("%s: select() error (testing stage): %s", router.nickname, e[0]) continue else: raise if len(read_list + write_list) == 0: log.debug("%s: select() timeout (test data stage)!", router.nickname) break if read_list: for read_sock in read_list: try: ip, source_port = read_sock.getpeername() my_ip, port = read_sock.getsockname() except socket.error, e: if e.errno == errno.ENOTCONN: continue if router.actual_ip and router.actual_ip != ip: log.debug("%s: multiple IP addresses, %s and %s (%s advertised)!", router.nickname, ip, router.actual_ip, router.ip) router.actual_ip = ip data = test_data[port] data_received = read_sock.recv(len(data)) if(data_received < len(data)): if data_received == data[:len(data_received)]: test_data[port] = data[len(data_received):] log.debug("incomplete response! continuing") continue if(data == data_received): log.debug("(%s, %d): test succeeded!", router.nickname, port) router.working_ports.add(port) else: log.debug("(%s, %d): test failed! Expected %s, got %s.", router.nickname, port, data, data_received) recv_sockets.remove(read_sock) done.append(read_sock) if write_list: for write_sock in write_list: ip, port = write_sock.getpeername() log.debug("(%s, %d): writing test data.", router.nickname, port) try: write_sock.send(test_data[port]) except socket.error, e: if e.errno == errno.ECONNRESET: log.debug("(%s, %d): Connection reset by peer.", router.nickname, port) send_sockets.remove(write_sock) continue send_sockets.remove(write_sock) done.append(write_sock) log.debug("Closing sockets!") for sock in done: sock.close() router.failed_ports = test_ports - router.working_ports test_completed = time.time() router.last_tested = int(test_completed) router.last_test_length = (test_completed - test_started) self.close_test_circuit(router) return True
def exit_test(self, router): """ Perform port and IP tests on router. Will block until all port tests are finished. Can raise the following errors: socket.error - errno == errno.ECONNREFUSED: Tor refused our SOCKS connected. """ test_data = {} test_ports = set() recv_sockets = [] listen_sockets = [] self.test_exit = router
921dc4f6d0461873a6adb949ad90974a0718ad25 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9986/921dc4f6d0461873a6adb949ad90974a0718ad25/torbel.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2427, 67, 3813, 12, 2890, 16, 4633, 4672, 3536, 11217, 1756, 471, 2971, 7434, 603, 4633, 18, 9980, 1203, 3180, 777, 1756, 7434, 854, 6708, 18, 4480, 1002, 326, 3751, 1334, 30, 2987, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2427, 67, 3813, 12, 2890, 16, 4633, 4672, 3536, 11217, 1756, 471, 2971, 7434, 603, 4633, 18, 9980, 1203, 3180, 777, 1756, 7434, 854, 6708, 18, 4480, 1002, 326, 3751, 1334, 30, 2987, 18, ...
if self.__defaultLayout.layoutID != layoutID:
try: if self.__defaultLayout.layoutID != layoutID: self.__defaultLayout = XiboLayout(layoutID,True) except:
def run(self): while self.running: self.interval = 300 # Find out how long we should wait between updates. try: self.interval = int(config.get('Main','xmdsUpdateInterval')) except: # self.interval has been set to a sensible default in this case. log.log(0,"warning",_("No XMDS Update Interval specified in your configuration")) log.log(0,"warning",_("Please check your xmdsUpdateInterval configuration option")) log.log(0,"warning",_("A default value has been used:") + " " + str(self.interval) + " " + _("seconds")) # Call schedule on the webservice schedule = '<schedule/>' try: schedule = self.xmds.Schedule() log.log(5,"audit",_("XmdsScheduler: XMDS Schedule() returned ") + str(schedule)) f = open(config.get('Main','libraryDir') + os.sep + 'schedule.xml','w') f.write(schedule) f.close() except IOError: log.log(0,"error",_("Error trying to cache Schedule to disk")) except XMDSException: log.log(0,"warning",_("XMDS RequiredFiles threw an exception")) try: try: f = open(config.get('Main','libraryDir') + os.sep + 'schedule.xml') schedule = f.read() finally: f.close() except: # Couldn't read or file doesn't exist. Either way, return the default blank schedule. pass scheduleText = "" # Process the received schedule # If the schedule hasn't changed, do nothing. if self.previousSchedule != schedule: doc = minidom.parseString(schedule) tmpLayouts = doc.getElementsByTagName('layout') defaultLayout = doc.getElementsByTagName('default')
62c3b46cd6d729c0e22feacc836fc900a1e0fc24 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5464/62c3b46cd6d729c0e22feacc836fc900a1e0fc24/XiboClient.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 12, 2890, 4672, 1323, 365, 18, 8704, 30, 365, 18, 6624, 273, 11631, 225, 468, 4163, 596, 3661, 1525, 732, 1410, 2529, 3086, 4533, 18, 775, 30, 365, 18, 6624, 273, 509, 12, 1425, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 1086, 12, 2890, 4672, 1323, 365, 18, 8704, 30, 365, 18, 6624, 273, 11631, 225, 468, 4163, 596, 3661, 1525, 732, 1410, 2529, 3086, 4533, 18, 775, 30, 365, 18, 6624, 273, 509, 12, 1425, ...
return [self.service.manager.getMessage(self.post_id).addCallback(self._cbDetailData)]
return [self.manager.getMessage(self.post_id).addCallback(self._cbDetailData)]
def display(self, request): self.request = request self.post_id = int(request.args.get('post_id',[0])[0]) self.forum_id = int(request.args.get('forum_id',[0])[0]) self.thread_id = int(request.args.get('thread_id',[0])[0]) return [self.service.manager.getMessage(self.post_id).addCallback(self._cbDetailData)]
bd6102d819612a9b6694f73898b35fa12f91d912 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12595/bd6102d819612a9b6694f73898b35fa12f91d912/gadgets.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2562, 12, 2890, 16, 590, 4672, 365, 18, 2293, 273, 590, 365, 18, 2767, 67, 350, 273, 509, 12, 2293, 18, 1968, 18, 588, 2668, 2767, 67, 350, 2187, 63, 20, 5717, 63, 20, 5717, 365, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2562, 12, 2890, 16, 590, 4672, 365, 18, 2293, 273, 590, 365, 18, 2767, 67, 350, 273, 509, 12, 2293, 18, 1968, 18, 588, 2668, 2767, 67, 350, 2187, 63, 20, 5717, 63, 20, 5717, 365, 1...
try: user = os.environ["HGUSER"] except: user = os.environ["LOGNAME"] + '@' + socket.getfqdn()
user = (os.environ.get("HGUSER") or os.environ.get("EMAIL") or os.environ.get("LOGNAME", "unknown") + '@' + socket.getfqdn())
def add(self, manifest, list, desc, transaction, p1=None, p2=None): try: user = os.environ["HGUSER"] except: user = os.environ["LOGNAME"] + '@' + socket.getfqdn() date = "%d %d" % (time.time(), time.timezone) list.sort() l = [hex(manifest), user, date] + list + ["", desc] text = "\n".join(l) return self.addrevision(text, transaction, self.count(), p1, p2)
3c7585009f39461dd1a7ded89e60d9cbdec46822 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11312/3c7585009f39461dd1a7ded89e60d9cbdec46822/hg.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 12, 2890, 16, 5643, 16, 666, 16, 3044, 16, 2492, 16, 293, 21, 33, 7036, 16, 293, 22, 33, 7036, 4672, 729, 273, 261, 538, 18, 28684, 18, 588, 2932, 44, 43, 4714, 7923, 578, 114...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 527, 12, 2890, 16, 5643, 16, 666, 16, 3044, 16, 2492, 16, 293, 21, 33, 7036, 16, 293, 22, 33, 7036, 4672, 729, 273, 261, 538, 18, 28684, 18, 588, 2932, 44, 43, 4714, 7923, 578, 114...
_sybase.builder()(*args, **kw)
return _sybase.builder()(*args, **kw)
def SybaseConnection(*args, **kw): _warn('SybaseConnection is deprecated; use connectionForURI("sybase://...") or "from sqlobject.sybase import builder; SybaseConnection = builder()"') _sybase.builder()(*args, **kw)
014eeb8d3d7c0716aa5bcee8199f0e6399edfd82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/6718/014eeb8d3d7c0716aa5bcee8199f0e6399edfd82/__init__.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 16455, 1969, 1952, 30857, 1968, 16, 2826, 9987, 4672, 389, 8935, 2668, 10876, 1969, 1952, 353, 6849, 31, 999, 1459, 1290, 3098, 2932, 9009, 1969, 2207, 7070, 13, 578, 315, 2080, 4744, 383,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 16455, 1969, 1952, 30857, 1968, 16, 2826, 9987, 4672, 389, 8935, 2668, 10876, 1969, 1952, 353, 6849, 31, 999, 1459, 1290, 3098, 2932, 9009, 1969, 2207, 7070, 13, 578, 315, 2080, 4744, 383,...
K = NumberField_quadratic(polynomial, name, check, embedding, latex_name=latex_name)
K = NumberField_quadratic(polynomial, name, latex_name, check, embedding)
def NumberField(polynomial, name=None, check=True, names=None, cache=True, embedding=None, latex_name=None): r""" Return *the* number field defined by the given irreducible polynomial and with variable with the given name. If check is True (the default), also verify that the defining polynomial is irreducible and over `\QQ`. INPUT: - ``polynomial`` - a polynomial over `\QQ` or a number field, or a list of polynomials. - ``name`` - a string (default: 'a'), the name of the generator - ``check`` - bool (default: True); do type checking and irreducibility checking. - ``embedding`` - image of the generator in an ambient field (default: None) EXAMPLES:: sage: z = QQ['z'].0 sage: K = NumberField(z^2 - 2,'s'); K Number Field in s with defining polynomial z^2 - 2 sage: s = K.0; s s sage: s*s 2 sage: s^2 2 Constructing a relative number field:: sage: K.<a> = NumberField(x^2 - 2) sage: R.<t> = K[] sage: L.<b> = K.extension(t^3+t+a); L Number Field in b with defining polynomial t^3 + t + a over its base field sage: L.absolute_field('c') Number Field in c with defining polynomial x^6 + 2*x^4 + x^2 - 2 sage: a*b a*b sage: L(a) a sage: L.lift_to_base(b^3 + b) -a Constructing another number field:: sage: k.<i> = NumberField(x^2 + 1) sage: R.<z> = k[] sage: m.<j> = NumberField(z^3 + i*z + 3) sage: m Number Field in j with defining polynomial z^3 + i*z + 3 over its base field Number fields are globally unique:: sage: K.<a>= NumberField(x^3-5) sage: a^3 5 sage: L.<a>= NumberField(x^3-5) sage: K is L True Having different defining polynomials makes the fields different:: sage: x = polygen(QQ, 'x'); y = polygen(QQ, 'y') sage: k.<a> = NumberField(x^2 + 3) sage: m.<a> = NumberField(y^2 + 3) sage: k Number Field in a with defining polynomial x^2 + 3 sage: m Number Field in a with defining polynomial y^2 + 3 One can also define number fields with specified embeddings, may be used for arithmetic and deduce relations with other number fields which would not be valid for an abstract number field:: sage: K.<a> = NumberField(x^3-2, embedding=1.2) sage: RR.coerce_map_from(K) Composite map: From: Number Field in a with defining polynomial x^3 - 2 To: Real Field with 53 bits of precision Defn: Generic morphism: From: Number Field in a with defining polynomial x^3 - 2 To: Real Lazy Field Defn: a -> 1.259921049894873? then Conversion via _mpfr_ method map: From: Real Lazy Field To: Real Field with 53 bits of precision sage: RR(a) 1.25992104989487 sage: 1.1 + a 2.35992104989487 sage: b = 1/(a+1); b 1/3*a^2 - 1/3*a + 1/3 sage: RR(b) 0.442493334024442 sage: L.<b> = NumberField(x^6-2, embedding=1.1) sage: L(a) b^2 sage: a + b b^2 + b Note that the image only needs to be specified to enough precision to distinguish roots, and is exactly computed to any needed precision:: sage: RealField(200)(a) 1.2599210498948731647672106072782283505702514647015079800820 One can embed into any other field:: sage: K.<a> = NumberField(x^3-2, embedding=CC.gen()-0.6) sage: CC(a) -0.629960524947436 + 1.09112363597172*I sage: L = Qp(5) sage: f = polygen(L)^3 - 2 sage: K.<a> = NumberField(x^3-2, embedding=f.roots()[0][0]) sage: a + L(1) 4 + 2*5^2 + 2*5^3 + 3*5^4 + 5^5 + 4*5^6 + 2*5^8 + 3*5^9 + 4*5^12 + 4*5^14 + 4*5^15 + 3*5^16 + 5^17 + 5^18 + 2*5^19 + O(5^20) sage: L.<b> = NumberField(x^6-x^2+1/10, embedding=1) sage: K.<a> = NumberField(x^3-x+1/10, embedding=b^2) sage: a+b b^2 + b sage: CC(a) == CC(b)^2 True sage: K.coerce_embedding() Generic morphism: From: Number Field in a with defining polynomial x^3 - x + 1/10 To: Number Field in b with defining polynomial x^6 - x^2 + 1/10 Defn: a -> b^2 The ``QuadraticField`` and ``CyclotomicField`` constructors create an embedding by default unless otherwise specified. :: sage: K.<zeta> = CyclotomicField(15) sage: CC(zeta) 0.913545457642601 + 0.406736643075800*I sage: L.<sqrtn3> = QuadraticField(-3) sage: K(sqrtn3) 2*zeta^5 + 1 sage: sqrtn3 + zeta 2*zeta^5 + zeta + 1 An example involving a variable name that defines a function in PARI:: sage: theta = polygen(QQ, 'theta') sage: M.<z> = NumberField([theta^3 + 4, theta^2 + 3]); M Number Field in z0 with defining polynomial theta^3 + 4 over its base field TESTS:: sage: x = QQ['x'].gen() sage: y = ZZ['y'].gen() sage: K = NumberField(x^3 + x + 3, 'a'); K Number Field in a with defining polynomial x^3 + x + 3 sage: K.defining_polynomial().parent() Univariate Polynomial Ring in x over Rational Field :: sage: L = NumberField(y^3 + y + 3, 'a'); L Number Field in a with defining polynomial y^3 + y + 3 sage: L.defining_polynomial().parent() Univariate Polynomial Ring in y over Rational Field :: sage: sage.rings.number_field.number_field._nf_cache = {} sage: K.<x> = CyclotomicField(5)[] sage: W.<a> = NumberField(x^2 + 1); W Number Field in a with defining polynomial x^2 + 1 over its base field sage: sage.rings.number_field.number_field._nf_cache = {} sage: W1 = NumberField(x^2+1,'a') sage: K.<x> = CyclotomicField(5)[] sage: W.<a> = NumberField(x^2 + 1); W Number Field in a with defining polynomial x^2 + 1 over its base field """ if name is None and names is None: raise TypeError, "You must specify the name of the generator." if not names is None: name = names if isinstance(polynomial, (list, tuple)): return NumberFieldTower(polynomial, name) name = sage.structure.parent_gens.normalize_names(1, name) if not isinstance(polynomial, polynomial_element.Polynomial): try: polynomial = polynomial.polynomial(QQ) except (AttributeError, TypeError): raise TypeError, "polynomial (=%s) must be a polynomial."%repr(polynomial) # convert ZZ to QQ R = polynomial.base_ring() Q = polynomial.parent().base_extend(R.fraction_field()) polynomial = Q(polynomial) if cache: key = (polynomial, polynomial.base_ring(), name, embedding, embedding.parent() if embedding is not None else None) if _nf_cache.has_key(key): K = _nf_cache[key]() if not K is None: return K if isinstance(R, NumberField_generic): S = R.extension(polynomial, name, check=check) if cache: _nf_cache[key] = weakref.ref(S) return S if polynomial.degree() == 2: K = NumberField_quadratic(polynomial, name, check, embedding, latex_name=latex_name) else: K = NumberField_absolute(polynomial, name, None, check, embedding, latex_name=latex_name) if cache: _nf_cache[key] = weakref.ref(K) return K
a1a9ef727e26796ae9f7e10297c4e3fe3ebf2d52 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9417/a1a9ef727e26796ae9f7e10297c4e3fe3ebf2d52/number_field.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3588, 974, 12, 3915, 13602, 16, 508, 33, 7036, 16, 866, 33, 5510, 16, 1257, 33, 7036, 16, 1247, 33, 5510, 16, 15853, 33, 7036, 16, 25079, 67, 529, 33, 7036, 4672, 436, 8395, 2000, 38...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 3588, 974, 12, 3915, 13602, 16, 508, 33, 7036, 16, 866, 33, 5510, 16, 1257, 33, 7036, 16, 1247, 33, 5510, 16, 15853, 33, 7036, 16, 25079, 67, 529, 33, 7036, 4672, 436, 8395, 2000, 38...
c.appendExten("s","Set(CALLERID(name)=%s)" % self.clid)
def createIncomingContext(self): c = AstConf("extensions.conf") contextin = "in-%s" % self.name c.setSection(contextin) c.appendExten("s","Set(CDR(intrunk)=%s)" % self.name) c.appendExten("_X.","Set(CDR(intrunk)=%s)" % self.name) if self.clid: needModule("func_callerid") c.appendExten("s","Set(CALLERID(name)=%s)" % self.clid) c.appendExten("_X.","Set(CALLERID(name)=%s)" % self.clid) global configlet_tree if self.contextin == 'phone' and self.phone: obj = configlet_tree.getConfigletByName(self.phone) try: pbx = obj.pbx c.appendExten("s", "Goto(%s,%s,1)" % (pbx,self.phone)) c.appendExten("_X.", "Goto(%s,%s,1)" % (pbx,self.phone)) except AttributeError: pass if self.contextin == 'ivr' and self.ivr: c.appendExten("s", "Goto(%s,s,1)" % self.ivr) c.appendExten("_X.", "Goto(%s,s,1)" % self.ivr) if self.contextin == 'pbx' and self.pbx: c.appendExten("s", "Goto(%s,s,1)" % self.pbx) c.appendExten("_X.", "Goto(%s,${EXTEN},1)" % self.ivr)
c16011697405cd9c1de5d4fe32312f4b1556e6dc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/2627/c16011697405cd9c1de5d4fe32312f4b1556e6dc/configlets.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 20370, 1042, 12, 2890, 4672, 276, 273, 16614, 3976, 2932, 9489, 18, 3923, 7923, 819, 267, 273, 315, 267, 6456, 87, 6, 738, 365, 18, 529, 276, 18, 542, 5285, 12, 2472, 267, 13, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 752, 20370, 1042, 12, 2890, 4672, 276, 273, 16614, 3976, 2932, 9489, 18, 3923, 7923, 819, 267, 273, 315, 267, 6456, 87, 6, 738, 365, 18, 529, 276, 18, 542, 5285, 12, 2472, 267, 13, 2...
deps = extract_deps(file, legal_deps) for hdr in deps.keys(): deps.update(h_deps.get(hdr, {}))
group = [ '$(OBJECTS_all)' ]
def main(): parser = ConfigParser.ConfigParser() parser.read('build.conf') dirs = { } files = get_files(parser.get('options', 'paths')) headers = get_files(parser.get('options', 'headers')) # compute the relevant headers, along with the implied includes legal_deps = { } for fname in headers: legal_deps[os.path.basename(fname)] = fname h_deps = { } for fname in headers: h_deps[os.path.basename(fname)] = extract_deps(fname, legal_deps) resolve_deps(h_deps) f = open('build-outputs.mk', 'w') f.write('# DO NOT EDIT. AUTOMATICALLY GENERATED.\n\n') objects = [ ] for file in files: assert file[-2:] == '.c' obj = file[:-2] + '.lo' objects.append(obj) dirs[os.path.dirname(file)] = None # what headers does this file include, along with the implied headers deps = extract_deps(file, legal_deps) for hdr in deps.keys(): deps.update(h_deps.get(hdr, {})) f.write('%s: %s .make.dirs %s\n' % (obj, file, string.join(deps.values()))) f.write('\nOBJECTS = %s\n\n' % string.join(objects)) f.write('HEADERS = $(top_srcdir)/%s\n\n' % string.join(headers, ' $(top_srcdir)/')) f.write('SOURCE_DIRS = %s $(EXTRA_SOURCE_DIRS)\n\n' % string.join(dirs.keys())) # Build a list of all necessary directories in build tree alldirs = { } for dir in dirs.keys(): d = dir while d: alldirs[d] = None d = os.path.dirname(d) # Sort so 'foo' is before 'foo/bar' keys = alldirs.keys() keys.sort() f.write('BUILD_DIRS = %s\n\n' % string.join(keys)) f.write('.make.dirs: $(srcdir)/build-outputs.mk\n' \ '\t@for d in $(BUILD_DIRS); do test -d $$d || mkdir $$d; done\n' \ '\t@echo timestamp > $@\n')
125666d12bf070a1454f737b429ae5b2aca0f0d3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/7752/125666d12bf070a1454f737b429ae5b2aca0f0d3/gen-build.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 13332, 2082, 273, 25076, 18, 809, 2678, 1435, 2082, 18, 896, 2668, 3510, 18, 3923, 6134, 225, 7717, 273, 288, 289, 1390, 273, 336, 67, 2354, 12, 4288, 18, 588, 2668, 2116, 2187, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2774, 13332, 2082, 273, 25076, 18, 809, 2678, 1435, 2082, 18, 896, 2668, 3510, 18, 3923, 6134, 225, 7717, 273, 288, 289, 1390, 273, 336, 67, 2354, 12, 4288, 18, 588, 2668, 2116, 2187, ...
mixer.setLineinVolume(0) self.thread.app.write(config.TV_SETTINGS + ' ' + 'fine_minus' + '\n')
mixer.setLineinVolume( 0 ) mixer.setIgainVolume( 0 ) self.thread.app.write( config.TV_SETTINGS + ' ' + 'fine_plus' + '\n' )
def EventHandler(self, event): print '%s app got %s event' % (self.mode, event) if event == rc.MENU or event == rc.STOP or event == rc.SELECT or event == rc.PLAY_END: self.Stop() rc.app = None menuwidget.refresh() elif event == rc.CHUP: if self.mode == 'vcr': return # Go to the next channel in the list self.TunerNextChannel() tuner_channel = self.TunerGetChannel() mixer.setLineinVolume(0) self.thread.app.write(config.TV_SETTINGS + ' ' + tuner_channel + '\n') time.sleep(0.1) mixer.setLineinVolume(90) elif event == rc.CHDOWN: if self.mode == 'vcr': return # Go to the previous channel in the list self.TunerPrevChannel() mixer.setLineinVolume(0) tuner_channel = self.TunerGetChannel() self.thread.app.write(config.TV_SETTINGS + ' ' + tuner_channel + '\n') time.sleep(0.1) mixer.setLineinVolume(90) elif event == rc.LEFT: if self.mode == 'vcr': return # Finetune minus mixer.setLineinVolume(0) self.thread.app.write(config.TV_SETTINGS + ' ' + 'fine_minus' + '\n') time.sleep(0.1) mixer.setLineinVolume(90) elif event == rc.RIGHT: if self.mode == 'vcr': return # Finetune minus mixer.setLineinVolume(0) self.thread.app.write(config.TV_SETTINGS + ' ' + 'fine_plus' + '\n') time.sleep(0.1) mixer.setLineinVolume(90)
288493038f35f8d2cf5f29dd7706d18eb5ab81b7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/11399/288493038f35f8d2cf5f29dd7706d18eb5ab81b7/v4l1tv.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 26012, 12, 2890, 16, 871, 4672, 1172, 1995, 87, 595, 2363, 738, 87, 871, 11, 738, 261, 2890, 18, 3188, 16, 871, 13, 309, 871, 422, 4519, 18, 29227, 578, 871, 422, 4519, 18, 17513, 57...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 26012, 12, 2890, 16, 871, 4672, 1172, 1995, 87, 595, 2363, 738, 87, 871, 11, 738, 261, 2890, 18, 3188, 16, 871, 13, 309, 871, 422, 4519, 18, 29227, 578, 871, 422, 4519, 18, 17513, 57...
x = datatype.encode(x)
if datatype is not None: x = datatype.encode(x)
def encode_query(query, schema=None): """This method encodes a query as defined by the "application/x-www-form-urlencoded" content type (see http://www.w3.org/TR/REC-html40/interact/forms.html#h-17.13.4.1 for details) The value expected is a dictonary like {'a': 1, 'b': 2}. The value returned is a byte string like "a=1&b=2". """ from itools.datatypes import String if schema is None: schema = {} line = [] for key in query: value = query[key] key = quote_plus(key) # XXX As of the application/x-www-form-urlencoded content type, # it has not sense to have a parameter without a value, so # "?a&b=1" should be the same as "?b=1" (check the spec). # But for the tests defined by RFC2396 to pass, we must preserve # these empty parameters. if value is None: line.append(key) continue # A list datatype = schema.get(key, String) if isinstance(value, list): for x in value: x = datatype.encode(x) line.append('%s=%s' % (key, quote_plus(x))) continue # A singleton value = datatype.encode(value) line.append('%s=%s' % (key, quote_plus(value))) return '&'.join(line)
4dae59a25a2e3968740f64479a5748b31713e8cf /local1/tlutelli/issta_data/temp/all_python//python/2008_temp/2008/12681/4dae59a25a2e3968740f64479a5748b31713e8cf/generic.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2017, 67, 2271, 12, 2271, 16, 1963, 33, 7036, 4672, 3536, 2503, 707, 16834, 279, 843, 487, 2553, 635, 326, 315, 3685, 19, 92, 17, 5591, 17, 687, 17, 19690, 6, 913, 618, 261, 5946, 10...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 2017, 67, 2271, 12, 2271, 16, 1963, 33, 7036, 4672, 3536, 2503, 707, 16834, 279, 843, 487, 2553, 635, 326, 315, 3685, 19, 92, 17, 5591, 17, 687, 17, 19690, 6, 913, 618, 261, 5946, 10...
if old_device is None and not is_eraser(new_device): return
def is_eraser(device): if device is None: return False return device.source == gdk.SOURCE_ERASER or 'eraser' in device.name.lower()
b09feb597ad2e733b9dc5d84ecc853e2c5d12452 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7129/b09feb597ad2e733b9dc5d84ecc853e2c5d12452/drawwindow.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 67, 12067, 264, 12, 5964, 4672, 309, 2346, 353, 599, 30, 327, 1083, 327, 2346, 18, 3168, 422, 314, 2883, 18, 6537, 67, 654, 37, 2123, 578, 296, 12067, 264, 11, 316, 2346, 18, 52...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 353, 67, 12067, 264, 12, 5964, 4672, 309, 2346, 353, 599, 30, 327, 1083, 327, 2346, 18, 3168, 422, 314, 2883, 18, 6537, 67, 654, 37, 2123, 578, 296, 12067, 264, 11, 316, 2346, 18, 52...
raise RuntimeException(RuntimeException.UNDEFINED_PREFIX, prefix=prefix)
raise XPathError(XPathError.UNDEFINED_PREFIX, prefix=prefix)
def match(self, context, node, principal_type=tree.element): if isinstance(node, principal_type): prefix, local_name = self.name_key if node.xml_local == local_name: try: return node.xml_namespace == context.namespaces[prefix] except KeyError: raise RuntimeException(RuntimeException.UNDEFINED_PREFIX, prefix=prefix) return 0
3add7a8b2e0b1b9eb92c39c76ca6ab2156088a07 /local1/tlutelli/issta_data/temp/all_python//python/2009_temp/2009/14078/3add7a8b2e0b1b9eb92c39c76ca6ab2156088a07/nodetests.py
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 845, 12, 2890, 16, 819, 16, 756, 16, 8897, 67, 723, 33, 3413, 18, 2956, 4672, 309, 1549, 12, 2159, 16, 8897, 67, 723, 4672, 1633, 16, 1191, 67, 529, 273, 365, 18, 529, 67, 856, 309...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ 1, 8585, 326, 22398, 316, 326, 981, 30, 1652, 845, 12, 2890, 16, 819, 16, 756, 16, 8897, 67, 723, 33, 3413, 18, 2956, 4672, 309, 1549, 12, 2159, 16, 8897, 67, 723, 4672, 1633, 16, 1191, 67, 529, 273, 365, 18, 529, 67, 856, 309...