query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
List the names of available readers Note that this will import all readers.
def names() -> Tuple[str, ...]: return plugins.list_all(package_name=__name__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_readers():\n return all_readers", "def get_reader_funcs():\n return READERS", "def list_reads(cls) -> list:\n return [cls.FWREAD, cls.RVREAD];", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def list_con...
[ "0.7477495", "0.64536035", "0.63747376", "0.5518427", "0.5518427", "0.5415075", "0.5403663", "0.5381984", "0.5381984", "0.53160554", "0.5299087", "0.527088", "0.52304727", "0.52028954", "0.5191611", "0.5185727", "0.51809806", "0.5179094", "0.5153803", "0.51439035", "0.513742"...
0.47953343
78
Check whether the given reader exists
def exists(reader_name: str) -> bool: return plugins.exists(package_name=__name__, plugin_name=reader_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _haveReadLocks(self): \n readLockFileName = ReadLock.fileName\n for name in os.listdir(self.dir):\n if name.startswith(readLockFileName):\n return True\n return False", "def exists(identifier, network):\n foo = next(load(identifier, network), None)...
[ "0.6164184", "0.5904849", "0.58638656", "0.58638656", "0.5770025", "0.575396", "0.56898904", "0.5686118", "0.56144536", "0.56005126", "0.5580309", "0.5566034", "0.5542885", "0.55397743", "0.5537704", "0.55220896", "0.5509569", "0.54929805", "0.549026", "0.54900736", "0.547761...
0.66304976
0
Get one line documentation for readers If no readers are specified, documentation for all available readers are returned.
def short_docs(*readers: str) -> List[Tuple[str, str]]: if not readers: readers = names() return [(r, plugins.doc(__name__, r, long_doc=False)) for r in readers]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_readers():\n return all_readers", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def read_documentation(self, fid):\r\n\r\n lin = self.read_...
[ "0.6139211", "0.5529351", "0.5529351", "0.5529351", "0.5385095", "0.53439957", "0.52958757", "0.5215697", "0.5117727", "0.49581614", "0.48913068", "0.4863882", "0.48443574", "0.48011702", "0.47767526", "0.47586027", "0.47446275", "0.4685375", "0.46740708", "0.4643249", "0.462...
0.67184293
0
Read a bytes stream with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found.
def read_stream( input_stream: IO[bytes], reader_name: Optional[str] = None, **reader_args: Any ) -> Reader: if reader_name is None: reader_name = identify(input_stream) reader = plugins.call( package_name=__name__, plugin_name=reader_name, input_stream=input_stream, **reader_args, ) reader.read() return reader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def read_file(\n file_path: Union[str, pathlib.Path],\n reader_name: Optional[str] = None,\n **reader_ar...
[ "0.6057923", "0.6019735", "0.5995071", "0.59576786", "0.5762868", "0.5712855", "0.5576423", "0.544311", "0.54237616", "0.54231197", "0.5403411", "0.53777176", "0.53776944", "0.52933985", "0.5291875", "0.52114725", "0.5191779", "0.5167444", "0.514691", "0.51206833", "0.510413"...
0.6454055
0
Read a file with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found.
def read_file( file_path: Union[str, pathlib.Path], reader_name: Optional[str] = None, **reader_args: Any, ) -> Reader: with open(file_path, mode="rb") as input_stream: return read_stream(input_stream, reader_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(filePath, reader='infer'):\n if isinstance(reader, str):\n if reader == 'infer':\n loader = inferReader(filePath)\n else:\n if reader in READERS:\n loader = READERS[reader]\n else:\n raise SerpentToolsException(\n ...
[ "0.73507124", "0.6649994", "0.6541662", "0.6465156", "0.6096228", "0.5905837", "0.5818582", "0.5808917", "0.57055247", "0.5697342", "0.5619084", "0.5508928", "0.5467147", "0.5462562", "0.5446384", "0.5420934", "0.5397053", "0.5380353", "0.53490704", "0.5324267", "0.53047097",...
0.7362303
0
Identify a reader that can read a given file A NoReaderFound error is raised if no such appropriate reader is found.
def identify(input_stream: IO[bytes]) -> str: import IPython where_am_i = "readers.__init__.identify" IPython.embed() raise exceptions.NoReaderFound( f"Found no reader that can read {input_stream.name}" ) from None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def inferReader(filePath):\n for reg, reader in six.iteritems(REGEXES):\n match = re.match(reg, filePat...
[ "0.67069465", "0.6362347", "0.60579586", "0.5630345", "0.54816085", "0.5466467", "0.544671", "0.5360829", "0.53323615", "0.53214943", "0.5317366", "0.5301518", "0.5290404", "0.52551913", "0.52348626", "0.51969", "0.5128019", "0.5122039", "0.5115842", "0.50972366", "0.5093929"...
0.48551324
39
Shortcut for symbol creation to test "function" and "indexed" API.
def symbol(name, dimensions, value=0., mode='function'): assert(mode in ['function', 'indexed']) s = DenseData(name=name, dimensions=dimensions) s.data[:] = value return s.indexify() if mode == 'indexed' else s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FakeSymbol(*args, _op, **kwargs):\n return symbol.Custom(*args, _op=_op, op_type=\"_fake\", **kwargs)", "def __call__(fun_name):", "def input_shortcut(symbol, name=None):\n def input_method(self, *args, **kwargs):\n return self.input(symbol, *args, **kwargs)\n input_method.__nam...
[ "0.6120692", "0.6099593", "0.5789169", "0.5777273", "0.5620227", "0.56047827", "0.55795795", "0.55412346", "0.55109686", "0.54903716", "0.5484314", "0.5438553", "0.5425026", "0.54227895", "0.5353313", "0.5350989", "0.53228265", "0.53207916", "0.5307697", "0.5287561", "0.52811...
0.0
-1
Tests basic pointwise arithmetic on twodimensional data
def test_flat(self, expr, result, mode): i, j = dimify('i j') a = symbol(name='a', dimensions=(i, j), value=2., mode=mode) b = symbol(name='b', dimensions=(i, j), value=3., mode=mode) fa = a.base.function if mode == 'indexed' else a fb = b.base.function if mode == 'indexed' else b eqn = eval(expr) Operator(eqn)(fa, fb) assert np.allclose(fa.data, result, rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z", "def Te...
[ "0.6385182", "0.6282518", "0.61600417", "0.61573863", "0.6152449", "0.61160505", "0.60762215", "0.6071223", "0.60207266", "0.6017882", "0.6017163", "0.59854347", "0.59580594", "0.59534204", "0.5947321", "0.591167", "0.5898243", "0.58920926", "0.58749056", "0.58674467", "0.586...
0.0
-1
Tests basic pointwise arithmetic on multidimensional data
def test_deep(self, expr, result, mode): i, j, k, l = dimify('i j k l') a = symbol(name='a', dimensions=(i, j, k, l), value=2., mode=mode) b = symbol(name='b', dimensions=(j, k), value=3., mode=mode) fa = a.base.function if mode == 'indexed' else a fb = b.base.function if mode == 'indexed' else b eqn = eval(expr) Operator(eqn)(fa, fb) assert np.allclose(fa.data, result, rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_multidimensional_operation(self):\n # start with something (1, 2, 3)\n data = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]])\n\n # split 1st dim (2, 2, 3)\n coefficients = np.ones((1, 2)) / 2\n expected = np.array(\n [[[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]], [[0.0,...
[ "0.6647943", "0.6466135", "0.6279551", "0.62339604", "0.6172996", "0.61697084", "0.6152088", "0.6150274", "0.6137789", "0.6136748", "0.6109442", "0.6009509", "0.5945426", "0.591577", "0.5912449", "0.590353", "0.5883301", "0.58808935", "0.5876383", "0.587444", "0.58706623", ...
0.0
-1
Tests pointwise increments with stencil offsets in one dimension
def test_indexed_increment(self, expr, result): j, l = dimify('j l') a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base fa = a.function fa.data[1:, 1:] = 0 eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data, result, rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coordination(c, stencil=nn_stencil):\n\n coordination = np.zeros_like(c, dtype=int)\n for dx, dy in stencil:\n tmp = np.array(c, dtype=bool, copy=True)\n if dx != 0:\n tmp = np.roll(tmp, dx, 0)\n if dy != 0:\n tmp = np.roll(tmp, dy, 1)\n coordination += t...
[ "0.65050405", "0.54784226", "0.5474315", "0.5348384", "0.5347258", "0.5336783", "0.53343135", "0.53305495", "0.5314463", "0.52894896", "0.5283575", "0.5209135", "0.52068275", "0.5183291", "0.5156076", "0.5149829", "0.51383203", "0.51378614", "0.5116575", "0.51146805", "0.5108...
0.0
-1
Test pointwise arithmetic with stencil offsets across two functions in indexed expression format
def test_indexed_stencil(self, expr, result): j, l = dimify('j l') a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base fa = a.function b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base fb = b.function eqn = eval(expr) Operator(eqn)(fa, fb) assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 ...
[ "0.54499704", "0.54236096", "0.54200417", "0.54183537", "0.53868484", "0.5386813", "0.5386671", "0.53676295", "0.53601813", "0.5349417", "0.53437924", "0.53436583", "0.5304866", "0.53009784", "0.5292884", "0.52877325", "0.5283028", "0.5282718", "0.5279577", "0.52767044", "0.5...
0.70558316
0
Test pointwise arithmetic with stencil offsets across a single functions with buffering dimension in indexed expression format
def test_indexed_buffered(self, expr, result): i, j, l = dimify('i j l') a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base fa = a.function eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n...
[ "0.70109046", "0.5731927", "0.5725207", "0.5534058", "0.5485873", "0.54508567", "0.5420271", "0.5402067", "0.53938526", "0.5361206", "0.53606236", "0.5359261", "0.5359202", "0.5320887", "0.5293664", "0.5291382", "0.5265721", "0.52385396", "0.523383", "0.52290106", "0.52282166...
0.5903518
1
Test pointwise arithmetic with stencil offsets and open loop boundaries in indexed expression format
def test_indexed_open_loops(self, expr, result): i, j, l = dimify('i j l') pushed = [d.size for d in [j, l]] j.size = None l.size = None a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed fa = a.function fa.data[0, :, :] = 2. eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12) j.size, l.size = pushed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n...
[ "0.6950202", "0.58628595", "0.56040186", "0.5595086", "0.5519812", "0.54418904", "0.54108006", "0.533119", "0.53181374", "0.53098184", "0.5292402", "0.5292149", "0.5247795", "0.5246134", "0.523379", "0.5228938", "0.5216184", "0.51775336", "0.51391", "0.513825", "0.51371264", ...
0.6362784
1
Test that the calltime overriding of Operator arguments works
def test_override_cache_aliasing(self): i, j, k, l = dimify('i j k l') a = symbol(name='a', dimensions=(i, j, k, l), value=2., mode='indexed').base.function a1 = symbol(name='a', dimensions=(i, j, k, l), value=3., mode='indexed').base.function a2 = symbol(name='a', dimensions=(i, j, k, l), value=4., mode='indexed').base.function eqn = Eq(a, a+3) op = Operator(eqn) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1.data, np.zeros(shape) + 6)) assert(np.allclose(a2.data, np.zeros(shape) + 7))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(a, b):", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n ...
[ "0.6805851", "0.6484445", "0.6379259", "0.6364742", "0.6364742", "0.6306603", "0.6228619", "0.61764985", "0.61253047", "0.61219794", "0.6098357", "0.60761136", "0.59945655", "0.5985499", "0.5953652", "0.5947286", "0.59347874", "0.59075207", "0.5906247", "0.590326", "0.5891721...
0.0
-1
Test calltime symbols overrides with other symbols
def test_override_symbol(self): i, j, k, l = dimify('i j k l') a = symbol(name='a', dimensions=(i, j, k, l), value=2.) a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.) a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.) op = Operator(Eq(a, a + 3)) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1.data, np.zeros(shape) + 6)) assert(np.allclose(a2.data, np.zeros(shape) + 7))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testSymbolHash(self):\n gScope = pykd.diaLoadPdb( str(target.module.pdb()) )\n symSet = set([ gScope[\"g_structTest\"], gScope[\"EnumWindowsProc1\"], gScope[\"g_structTest\"] ])\n self.assertEqual( 2, len(symSet) )\n self.assertTrue( gScope[\"g_structTest\"] in symSet )\n sel...
[ "0.6088717", "0.5891969", "0.586841", "0.57884955", "0.57511026", "0.5713668", "0.5713668", "0.57010543", "0.5664404", "0.5631575", "0.5563887", "0.55595315", "0.55191725", "0.5508196", "0.54970926", "0.54917103", "0.5480295", "0.54737866", "0.5444322", "0.54287255", "0.54046...
0.58977205
1
Test calltime symbols overrides with numpy arrays
def test_override_array(self): i, j, k, l = dimify('i j k l') shape = tuple(d.size for d in (i, j, k, l)) a = symbol(name='a', dimensions=(i, j, k, l), value=2.) a1 = np.zeros(shape=shape, dtype=np.float32) + 3. a2 = np.zeros(shape=shape, dtype=np.float32) + 4. op = Operator(Eq(a, a + 3)) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1, np.zeros(shape) + 6)) assert(np.allclose(a2, np.zeros(shape) + 7))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_TimeArray_repr():", "def test_format_signature_numpy():", "def test_TimeArray_convert_unit():", "def test_TimeArray_copyflag():\r\n\r\n #These two should both generate a TimeArray, with one picosecond.\r\n #This one holds time_unit='s'\r\n t1 = ts.TimeArray(np.array([1], dtype=np.int64), co...
[ "0.6313198", "0.6234536", "0.61531353", "0.60441715", "0.5897211", "0.58689487", "0.5791092", "0.5729955", "0.5682435", "0.5682435", "0.5633471", "0.55964816", "0.55935395", "0.5578403", "0.5577986", "0.5554115", "0.5551436", "0.555043", "0.554579", "0.5534603", "0.55257624",...
0.6475489
0
Test that the dimension sizes are being inferred correctly
def test_dimension_size_infer(self, nt=100): i, j, k = dimify('i j k') shape = tuple([d.size for d in [i, j, k]]) a = DenseData(name='a', shape=shape).indexed b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed eqn = Eq(b[time, x, y, z], a[x, y, z]) op = Operator(eqn) _, op_dim_sizes = op.arguments() assert(op_dim_sizes[time.name] == nt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def dim...
[ "0.76725674", "0.75539386", "0.74125654", "0.7359512", "0.73051524", "0.72323257", "0.7225344", "0.7185799", "0.71210706", "0.6941943", "0.68519884", "0.6851906", "0.68469083", "0.68195313", "0.6812142", "0.67658305", "0.6746414", "0.6741667", "0.6688384", "0.66588515", "0.66...
0.76781756
0
Test explicit overrides for the leading time dimension
def test_dimension_size_override(self, nt=100): i, j, k = dimify('i j k') a = TimeData(name='a', dimensions=(i, j, k)) one = symbol(name='one', dimensions=(i, j, k), value=1.) op = Operator(Eq(a.forward, a + one)) # Test dimension override via the buffered dimenions a.data[0] = 0. op(a=a, t=6) assert(np.allclose(a.data[1], 5.)) # Test dimension override via the parent dimenions a.data[0] = 0. op(a=a, time=5) assert(np.allclose(a.data[0], 4.))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_general_subset_invalid_time():\n pass", "def test_fix_metadata_correct_time(self):\n fixed_cube = self.fix.fix_metadata([self.cube])[0]\n time_coord = fixed_cube.coord('time')\n np.testing.assert_allclose(time_coord.points, [0, 1])\n assert time_coord.bounds is None", "d...
[ "0.6225104", "0.61606276", "0.60171777", "0.59855527", "0.5899436", "0.5890807", "0.57126755", "0.56421477", "0.5499632", "0.5453681", "0.54009974", "0.53884566", "0.53690064", "0.534088", "0.5325772", "0.5315858", "0.530437", "0.5300429", "0.52890176", "0.5282546", "0.527451...
0.6123395
2
Emulates a potential implementation of boundary condition loops
def test_directly_indexed_expression(self, fa, ti0, t0, exprs): eqs = EVAL(exprs, ti0.base, t0) op = Operator(eqs, dse='noop', dle='noop') trees = retrieve_iteration_tree(op) assert len(trees) == 2 assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simulate_boundary(self,print_every=1000,do_F_bound=True):\n n_t = self.t_span.size\n self.n_t = n_t\n x = self.x0.copy()\n self._triangulate(x)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.x = x.copy()\n sel...
[ "0.6390884", "0.633568", "0.63244265", "0.6194868", "0.6181194", "0.6133254", "0.5930542", "0.58581144", "0.58474624", "0.58428305", "0.5813989", "0.5792263", "0.57295126", "0.57068944", "0.5669466", "0.5665661", "0.5663369", "0.56552625", "0.56534404", "0.56359583", "0.56296...
0.0
-1
Test the generation of a specific box profile against a known result.
def test_box(): savedImg = galsim.fits.read(os.path.join(imgdir, "box_1.fits")) myImg = galsim.ImageF(savedImg.bounds, scale=0.2) myImg.setCenter(0,0) test_flux = 1.8 pixel = galsim.Pixel(scale=1, flux=1) pixel.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject Pixel disagrees with expected result") np.testing.assert_array_equal( pixel.scale, 1, err_msg="Pixel scale returned wrong value") # Check with default_params pixel = galsim.Pixel(scale=1, flux=1, gsparams=default_params) pixel.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject Pixel with default_params disagrees with expected result") pixel = galsim.Pixel(scale=1, flux=1, gsparams=galsim.GSParams()) pixel.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject Pixel with GSParams() disagrees with expected result") # Use non-unity values. pixel = galsim.Pixel(flux=1.7, scale=2.3) gsp = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) pixel2 = galsim.Pixel(flux=1.7, scale=2.3, gsparams=gsp) assert pixel2 != pixel assert pixel2 == pixel.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) # Test photon shooting. do_shoot(pixel,myImg,"Pixel") # Check picklability do_pickle(pixel, lambda x: x.drawImage(method='no_pixel')) do_pickle(pixel) do_pickle(galsim.Pixel(1)) # Check that non-square Box profiles work correctly scale = 0.2939 # Use a strange scale here to make sure that the centers of the pixels # never fall on the box edge, otherwise it gets a bit weird to know what # the correct SB value is for that pixel. im = galsim.ImageF(16,16, scale=scale) gsp = galsim.GSParams(maximum_fft_size = 30000) for (width,height) in [ (3,2), (1.7, 2.7), (2.2222, 3.1415) ]: box = galsim.Box(width=width, height=height, flux=test_flux, gsparams=gsp) check_basic(box, "Box with width,height = %f,%f"%(width,height)) do_shoot(box,im,"Box with width,height = %f,%f"%(width,height)) if __name__ == '__main__': # These are slow because they require a pretty huge fft. # So only do them if running as main. do_kvalue(box,im,"Box with width,height = %f,%f"%(width,height)) cen = galsim.PositionD(0, 0) np.testing.assert_equal(box.centroid, cen) np.testing.assert_almost_equal(box.kValue(cen), (1+0j) * test_flux) np.testing.assert_almost_equal(box.flux, test_flux) np.testing.assert_almost_equal(box.xValue(cen), box.max_sb) np.testing.assert_almost_equal(box.xValue(width/2.-0.001, height/2.-0.001), box.max_sb) np.testing.assert_almost_equal(box.xValue(width/2.-0.001, height/2.+0.001), 0.) np.testing.assert_almost_equal(box.xValue(width/2.+0.001, height/2.-0.001), 0.) np.testing.assert_almost_equal(box.xValue(width/2.+0.001, height/2.+0.001), 0.) np.testing.assert_array_equal( box.width, width, err_msg="Box width returned wrong value") np.testing.assert_array_equal( box.height, height, err_msg="Box height returned wrong value") gsp2 = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) box2 = galsim.Box(width=width, height=height, flux=test_flux, gsparams=gsp2) assert box2 != box assert box2 == box.withGSParams(gsp2) assert box2 != box.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) assert box2.withGSParams(maximum_fft_size=30000) == box.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) # Check picklability do_pickle(box, lambda x: x.drawImage(method='no_pixel')) do_pickle(box) do_pickle(galsim.Box(1,1)) # Check sheared boxes the same way box = galsim.Box(width=3, height=2, flux=test_flux, gsparams=gsp) box = box.shear(galsim.Shear(g1=0.2, g2=-0.3)) check_basic(box, "Sheared Box", approx_maxsb=True) do_shoot(box,im, "Sheared Box") if __name__ == '__main__': do_kvalue(box,im, "Sheared Box") do_pickle(box, lambda x: x.drawImage(method='no_pixel')) do_pickle(box) cen = galsim.PositionD(0, 0) np.testing.assert_equal(box.centroid, cen) np.testing.assert_almost_equal(box.kValue(cen), (1+0j) * test_flux) np.testing.assert_almost_equal(box.flux, test_flux) np.testing.assert_almost_equal(box.xValue(cen), box.max_sb) # This is also a profile that may be convolved using real space convolution, so test that. if __name__ == '__main__': conv = galsim.Convolve(box, galsim.Pixel(scale=scale), real_space=True) check_basic(conv, "Sheared Box convolved with pixel in real space", approx_maxsb=True, scale=0.2) do_kvalue(conv,im, "Sheared Box convolved with pixel in real space") do_pickle(conv, lambda x: x.xValue(0.123,-0.456)) do_pickle(conv)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_game_boxscore(self):\n pass", "def testProfileCreation(self):\n small_tree1_equality = self.checkProfileEquality(self.profiles[0], self.small_profile1)\n small_tree2_equality = self.checkProfileEquality(self.profiles[1], self.small_profile2)\n known_tree1_equality = self....
[ "0.61851823", "0.58061886", "0.5779419", "0.5712594", "0.5674602", "0.5597677", "0.55736697", "0.5516804", "0.5508091", "0.5494304", "0.5471239", "0.5412909", "0.5381883", "0.5344296", "0.53197837", "0.531884", "0.53139627", "0.52807105", "0.52799165", "0.52634877", "0.525860...
0.5113333
37
Test the generation of a specific tophat profile against a known result.
def test_tophat(): savedImg = galsim.fits.read(os.path.join(imgdir, "tophat_101.fits")) myImg = galsim.ImageF(savedImg.bounds, scale=0.2) myImg.setCenter(0,0) test_flux = 1.8 # There are numerical issues with using radius = 1, since many points are right on the edge # of the circle. e.g. (+-1,0), (0,+-1), (+-0.6,+-0.8), (+-0.8,+-0.6). And in practice, some # of these end up getting drawn and not others, which means it's not a good choice for a unit # test since it wouldn't be any less correct for a different subset of these points to be # drawn. Using r = 1.01 solves this problem and makes the result symmetric. tophat = galsim.TopHat(radius=1.01, flux=1) tophat.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject TopHat disagrees with expected result") np.testing.assert_array_equal( tophat.radius, 1.01, err_msg="TopHat radius returned wrong value") # Check with default_params tophat = galsim.TopHat(radius=1.01, flux=1, gsparams=default_params) tophat.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject TopHat with default_params disagrees with expected result") tophat = galsim.TopHat(radius=1.01, flux=1, gsparams=galsim.GSParams()) tophat.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject TopHat with GSParams() disagrees with expected result") # Use non-unity values. tophat = galsim.TopHat(flux=1.7, radius=2.3) gsp = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) tophat2 = galsim.TopHat(flux=1.7, radius=2.3, gsparams=gsp) assert tophat2 != tophat assert tophat2 == tophat.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) # Test photon shooting. do_shoot(tophat,myImg,"TopHat") # Test shoot and kvalue scale = 0.2939 im = galsim.ImageF(16,16, scale=scale) # The choices of radius here are fairly specific. If the edge of the circle comes too close # to the center of one of the pixels, then the test will fail, since the Fourier draw method # will blur the edge a bit and give some flux to that pixel. for radius in [ 1.2, 0.93, 2.11 ]: tophat = galsim.TopHat(radius=radius, flux=test_flux) check_basic(tophat, "TopHat with radius = %f"%radius) do_shoot(tophat,im,"TopHat with radius = %f"%radius) do_kvalue(tophat,im,"TopHat with radius = %f"%radius) # This is also a profile that may be convolved using real space convolution, so test that. conv = galsim.Convolve(tophat, galsim.Pixel(scale=scale), real_space=True) check_basic(conv, "TopHat convolved with pixel in real space", approx_maxsb=True, scale=0.2) do_kvalue(conv,im, "TopHat convolved with pixel in real space") cen = galsim.PositionD(0, 0) np.testing.assert_equal(tophat.centroid, cen) np.testing.assert_almost_equal(tophat.kValue(cen), (1+0j) * test_flux) np.testing.assert_almost_equal(tophat.flux, test_flux) np.testing.assert_almost_equal(tophat.xValue(cen), tophat.max_sb) np.testing.assert_almost_equal(tophat.xValue(radius-0.001, 0.), tophat.max_sb) np.testing.assert_almost_equal(tophat.xValue(0., radius-0.001), tophat.max_sb) np.testing.assert_almost_equal(tophat.xValue(radius+0.001, 0.), 0.) np.testing.assert_almost_equal(tophat.xValue(0., radius+0.001), 0.) # Check picklability do_pickle(tophat, lambda x: x.drawImage(method='no_pixel')) do_pickle(tophat) do_pickle(galsim.TopHat(1)) # Check sheared tophat the same way tophat = galsim.TopHat(radius=1.2, flux=test_flux) # Again, the test is very sensitive to the choice of shear here. Most values fail because # some pixel center gets too close to the resulting ellipse for the fourier draw to match # the real-space draw at the required accuracy. tophat = tophat.shear(galsim.Shear(g1=0.15, g2=-0.33)) check_basic(tophat, "Sheared TopHat") do_shoot(tophat,im, "Sheared TopHat") do_kvalue(tophat,im, "Sheared TopHat") cen = galsim.PositionD(0, 0) np.testing.assert_equal(tophat.centroid, cen) np.testing.assert_almost_equal(tophat.kValue(cen), (1+0j) * test_flux) np.testing.assert_almost_equal(tophat.flux, test_flux) np.testing.assert_almost_equal(tophat.xValue(cen), tophat.max_sb) # Check picklability do_pickle(tophat, lambda x: x.drawImage(method='no_pixel')) do_pickle(tophat) # Check real-space convolution of the sheared tophat. conv = galsim.Convolve(tophat, galsim.Pixel(scale=scale), real_space=True) check_basic(conv, "Sheared TopHat convolved with pixel in real space", approx_maxsb=True, scale=0.2) do_kvalue(conv,im, "Sheared TopHat convolved with pixel in real space")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_run_profile(dpa_model_spec, dpa_init):\n test_times = np.array(\n DateTime(['2019:001:00:00:00', '2019:001:12:00:00', '2019:002:00:00:00', '2019:003:00:00:00']).secs)\n pitch = np.array([150, 90, 156, 156])\n roll = np.array([0, -5, 10, 0])\n test_schedule = {'pitch': pitch, 'roll': rol...
[ "0.60760397", "0.59960103", "0.578803", "0.57689196", "0.56904364", "0.5670949", "0.5598614", "0.5586479", "0.55442274", "0.5540673", "0.5539732", "0.5532514", "0.5515403", "0.55088836", "0.54860824", "0.54771906", "0.54092103", "0.5376669", "0.53727055", "0.53688246", "0.534...
0.0
-1
Test Box with photon shooting. Particularly the flux of the final image.
def test_box_shoot(): rng = galsim.BaseDeviate(1234) obj = galsim.Box(width=1.3, height=2.4, flux=1.e4) im = galsim.Image(100,100, scale=1) im.setCenter(0,0) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "Box makePhot not equivalent to drawPhot" obj = galsim.Pixel(scale=9.3, flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "Pixel makePhot not equivalent to drawPhot" obj = galsim.TopHat(radius=4.7, flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "TopHat makePhot not equivalent to drawPhot"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_box():\n savedImg = galsim.fits.read(os.path.join(imgdir, \"box_1.fits\"))\n myImg = galsim.ImageF(savedImg.bounds, scale=0.2)\n myImg.setCenter(0,0)\n test_flux = 1.8\n\n pixel = galsim.Pixel(scale=1, flux=1)\n pixel.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing...
[ "0.6883019", "0.60247976", "0.5729917", "0.5662367", "0.5657805", "0.56022036", "0.5602087", "0.55716115", "0.5532589", "0.5502499", "0.5473831", "0.54660267", "0.54527724", "0.54423535", "0.5430135", "0.5401244", "0.5400176", "0.53518057", "0.53516215", "0.53498757", "0.5337...
0.72881216
0
Test base.py GSObjects for notequals.
def test_ne(): # Define some universal gsps gsp = galsim.GSParams(maxk_threshold=1.1e-3, folding_threshold=5.1e-3) # Pixel. Params include scale, flux, gsparams. # gsparams. # The following should all test unequal: gals = [galsim.Pixel(scale=1.0), galsim.Pixel(scale=1.1), galsim.Pixel(scale=1.0, flux=1.1), galsim.Pixel(scale=1.0, gsparams=gsp)] all_obj_diff(gals) # Box. Params include width, height, flux, gsparams. # gsparams. # The following should all test unequal: gals = [galsim.Box(width=1.0, height=1.0), galsim.Box(width=1.1, height=1.0), galsim.Box(width=1.0, height=1.1), galsim.Box(width=1.0, height=1.0, flux=1.1), galsim.Box(width=1.0, height=1.0, gsparams=gsp)] all_obj_diff(gals) # TopHat. Params include radius, flux, gsparams. # gsparams. # The following should all test unequal: gals = [galsim.TopHat(radius=1.0), galsim.TopHat(radius=1.1), galsim.TopHat(radius=1.0, flux=1.1), galsim.TopHat(radius=1.0, gsparams=gsp)] all_obj_diff(gals)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_not_equal_on_not_equal_value(self):\n a =...
[ "0.74571276", "0.73648274", "0.73306084", "0.72958326", "0.7260039", "0.72194594", "0.7127766", "0.70569617", "0.7055979", "0.7028425", "0.70240885", "0.7012719", "0.6914279", "0.68538886", "0.68433744", "0.6823976", "0.6806015", "0.6802378", "0.6794009", "0.675546", "0.67511...
0.0
-1
Decide whether to enter hotspot mode or wifi mode and then do so
def set_wifi_mode(args): pass """+ try: if args['mode'] == 'hotspot': logger.info('will enter hotspot mode') #TODO - Need to capture the line that contains interface [some lan id] and uncomment it. change_file_line(path.join('/etc', 'dhcpcd.conf'), interface_l1_res, 'interface {}\n'.format() return True if args['silent'] else 'Ok' if args['mode'] == 'wi-fi': logger.info('will enter wi-fi mode') return True if args['silent'] else 'Ok' else: logger.error('Unknown wi-fi mode: {}'.format(args['mode'])) return False if args['silent'] else 'ERROR' except: logger.error('Exception in set_wifi_mode: {}, {}'.format(exc_info()[0], exc_info()[1])) return False if args['silent'] else 'ERROR' """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cycle_wifi(mode=None):\n call(['ifdown', settings.WIFI_INTERFACE])\n if mode is not None:\n call(['iwconfig', settings.WIFI_INTERFACE, 'mode', mode])\n call(['ifup', settings.WIFI_INTERFACE])", "def switch_network(self,type = None):\n network_type = self.appconfig(type...
[ "0.6191428", "0.60795987", "0.60477906", "0.5971737", "0.59445924", "0.58588445", "0.57119495", "0.570681", "0.57007", "0.5689363", "0.5664943", "0.5628451", "0.5603096", "0.5597446", "0.55574036", "0.55567616", "0.5528155", "0.55259633", "0.5525236", "0.5521546", "0.5498724"...
0.71384984
0
Get a pointer for function name with provided argtypes and restype
def ptr(self, space, w_name, w_argtypes, w_restype, flags=FUNCFLAG_CDECL): resshape = unpack_resshape(space, w_restype) if resshape is None: w_resshape = space.w_None else: w_resshape = resshape argtypes_w = space.fixedview(w_argtypes) w_argtypes = space.newtuple(argtypes_w) w_key = space.newtuple([w_name, w_argtypes, w_resshape]) try: return space.getitem(self.w_cache, w_key) except OperationError as e: if e.match(space, space.w_KeyError): pass else: raise # Array arguments not supported directly (in C, an array argument # will be just a pointer). And the result cannot be an array (at all). argshapes = unpack_argshapes(space, w_argtypes) ffi_argtypes = [shape.get_basic_ffi_type() for shape in argshapes] if resshape is not None: ffi_restype = resshape.get_basic_ffi_type() else: ffi_restype = ffi_type_void if space.isinstance_w(w_name, space.w_text): name = space.text_w(w_name) try: ptr = self.cdll.getrawpointer(name, ffi_argtypes, ffi_restype, flags) except KeyError: raise oefmt(space.w_AttributeError, "No symbol %s found in library %s", name, self.name) except LibFFIError: raise got_libffi_error(space) elif (_MS_WINDOWS and space.isinstance_w(w_name, space.w_int)): ordinal = space.int_w(w_name) try: ptr = self.cdll.getrawpointer_byordinal(ordinal, ffi_argtypes, ffi_restype, flags) except KeyError: raise oefmt(space.w_AttributeError, "No symbol %d found in library %s", ordinal, self.name) except LibFFIError: raise got_libffi_error(space) else: raise oefmt(space.w_TypeError, "function name must be string or integer") w_funcptr = W_FuncPtr(space, ptr, argshapes, resshape) space.setitem(self.w_cache, w_key, w_funcptr) return w_funcptr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_func(name, argtypes=None, restype=c_int, lib=libDE):\n logger.debug(\"Getting NewWordFinder API function: 'name': '{}', 'argtypes': '{}',\"\n \" 'restype': '{}'.\".format(name, argtypes, restype))\n func = getattr(lib, name)\n if argtypes is not None:\n func.argtypes = argty...
[ "0.66590554", "0.61740726", "0.61581415", "0.59001285", "0.5855032", "0.58468574", "0.5705164", "0.5677757", "0.5639868", "0.5615557", "0.5614763", "0.56135756", "0.5601841", "0.55988264", "0.5483516", "0.54684794", "0.5429972", "0.53992546", "0.53571665", "0.53392667", "0.53...
0.6079239
3
Set up a root logger showing all entries in the console.
def logger(request): log = logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_logger():\n root = logging.getLogger()\n root.setLevel(LOGGING_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(LOGGING_LEVEL)\n ch.setFormatter(formatter)\n root.addHandler(ch)", "def setup_logging():\n f...
[ "0.7975957", "0.7763486", "0.7686616", "0.75928164", "0.7581792", "0.7508208", "0.73854136", "0.73814046", "0.73402363", "0.7313796", "0.7308403", "0.73038954", "0.7274763", "0.7260364", "0.72556174", "0.7249487", "0.7241462", "0.72125137", "0.71615887", "0.71491545", "0.7109...
0.0
-1
Set up a mongo connection reset and ready to roll.
def mongodb(request): from pp.user.model import db as mongo log = get_log('mongodb') db_name = "testingdb-{}".format(uuid.uuid4().hex) mongo.init(dict(db_name=db_name)) db = mongo.db() db.hard_reset() log.info('database ready for testing "{}"'.format(db_name)) def db_teardown(x=None): db.hard_reset() log.warn('teardown database for testing "{}"'.format(db_name)) request.addfinalizer(db_teardown) return db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mongodb_init(cls, host=\"127.0.0.1\", port=27017, username=\"\", password=\"\", dbname=\"admin\"):\n if username and password:\n uri = \"mongodb://{username}:{password}@{host}:{port}/{dbname}\".format(username=quote_plus(username),\n ...
[ "0.70171124", "0.68669546", "0.6740658", "0.6647585", "0.6619428", "0.6484099", "0.6464825", "0.6440319", "0.6433522", "0.6415763", "0.6397156", "0.6346596", "0.6345212", "0.62915", "0.6224574", "0.61763144", "0.61705923", "0.6150122", "0.6147837", "0.613134", "0.60872823", ...
0.6147428
19
Open a tab delimited file and return links and time
def parser(list_of_text): # Youtube link regex yt_link = re.compile(r"http(s)?:\/\/www\.youtu.*") pron_link = re.compile(r".*pornhub.*") pic_link = re.compile(r"^http(s)?:\/\/.*jpg.*") pics = [link.split() for link in list_of_text if re.match(pic_link, link)] found_yt_links = [line.split() for line in list_of_text if re.match(yt_link, line)] found_pron = [line.split() for line in list_of_text if re.match(pron_link, line)] joined_links = found_yt_links + found_pron return joined_links, pics
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readTab(file_name):\n data = []\n meta = []\n l=0\n for line in open(file_name):\n if l<3:\n meta.append(line.strip(\"\\n\").split(\"\\t\"))\n else:\n if len(line.strip(\"\\n\").split(\"\\t\")) == len(meta[0]):\n data.append(line.strip(\"\\n\").spl...
[ "0.6074462", "0.5697214", "0.55812126", "0.55209875", "0.5477201", "0.54759246", "0.544581", "0.5430003", "0.54228246", "0.53985965", "0.5344914", "0.53337824", "0.5330407", "0.53107756", "0.5296276", "0.5268797", "0.5236354", "0.52124465", "0.52045405", "0.51980215", "0.5169...
0.0
-1
Function that returns true if a string contains a number
def hasNumbers(inputString): return any(char.isdigit() for char in inputString)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def has_number(any_string):\n return any(char.isdigit() for char in any_string)", "def has_num(text):\n return any(str.isdigit(c) for c in text)", "def is_number(s):\r\n try:\r\n int(s)\r\n ...
[ "0.86861813", "0.83668447", "0.82383734", "0.7930955", "0.79304606", "0.7924505", "0.78930366", "0.78647584", "0.77653086", "0.7743263", "0.7714202", "0.7706238", "0.77058214", "0.77046204", "0.77044505", "0.7690902", "0.766317", "0.7639565", "0.7527607", "0.7527464", "0.7508...
0.8393111
1
Small function to change time format. Used for make_time func
def tedoius_time(time_string): start = ['start', 'begin', 'beginning', 'head', 'first'] end = ['slut', 'end', 'tail', 'finish', 'finito', 'fin', 'done', 'finished'] if time_string.lower() in start: time_string = "00:00:00" # We need this exact string for later elif time_string.lower() in end: return time_string elif len(time_string) == 1: time_string = f"00:00:0{time_string}" elif len(time_string) == 2: time_string = f"00:00:{time_string}" elif len(time_string) == 3: time_string = f"00:00{time_string}" elif len(time_string) == 4: time_string = f"00:0{time_string}" elif len(time_string) == 5: time_string = f"00:{time_string}" elif len(time_string) == 6: time_string = f"00{time_string}" elif len(time_string) == 7: time_string = f"0{time_string}" elif len(time_string) > 8: raise('Time string too long!') return time_string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_time(self, time):\n hh = time[0:2]\n mm = time[2:4]\n ss = time[4:]\n return \"%s:%s:%s UTC\" % (hh,mm,ss)", "def time_hack(self):\n now = datetime.datetime.now()\n monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',\n 'jul', 'aug', 'sep', 'oct', ...
[ "0.7315664", "0.72966367", "0.7183788", "0.7177541", "0.71764636", "0.7169046", "0.7167851", "0.7160244", "0.7139336", "0.7091297", "0.7036572", "0.70216924", "0.6875871", "0.6864549", "0.6824766", "0.6812519", "0.68091184", "0.680905", "0.68039894", "0.67950267", "0.6790418"...
0.0
-1
Function that saves the return_list from make_time to a file called yt_vids.txt Optional, default False
def save_link_time(return_list, path_to_download): # Opens a new file and writes lines to it and saves it at the spot provided with open(os.path.join(path_to_download, "yt_vids.txt"), "w") as w: w.write('\n'.join('{} {} {}'.format( x[0], x[1][0], x[1][1]) for x in return_list))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_save_list_to_file(self):\n task_list = TaskList()\n task1 = Task()\n output_file_path = self.testing_files[0]\n task1.set_name('Test Task One')\n task1.set_minutes(30)\n task1.set_notes('This is a great test task')\n task_list.add_task(task1)\n\n sel...
[ "0.5606279", "0.54776007", "0.5325414", "0.527905", "0.5232085", "0.5220749", "0.5214294", "0.5194305", "0.5157583", "0.5149266", "0.51426595", "0.51125914", "0.50968117", "0.50955224", "0.50598377", "0.50564367", "0.5056409", "0.5055913", "0.50520384", "0.5035325", "0.503340...
0.7020351
0
Function that downloads a whole video when no interval is supplied Downloaded to the same place where yt_vids is saved to (from save_link_time func)
def download_whole(no_interval): print(os.getcwd()) SAVE_PATH = 'tmp' ydl_opts = {"nocheckcertificate": True, "noplaylist": True, 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'} with youtube_dl.YoutubeDL(ydl_opts) as ydl: for video in range(len(no_interval)): try: ydl.download([no_interval[video]]) except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError: print(f"Couldn't download {no_interval[video]}") continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download(df_shorter,folderName):\n os.mkdir(str(folderName))\n path = os.getcwd()+'\\\\'+str(folderName)+'\\\\'\n #add column with video link generated from IDs\n df_shorter['urls'] = df_shorter['id'].apply(lambda x: generateLinkFromId(x))\n vid_dl = []\n i = 1\n for url in df_shorter['url...
[ "0.73994005", "0.72689897", "0.7202489", "0.7171504", "0.7060031", "0.6903402", "0.6774965", "0.67710614", "0.67318517", "0.6675659", "0.66156524", "0.66117424", "0.6610987", "0.6610455", "0.6599049", "0.6573128", "0.6570862", "0.6523697", "0.651439", "0.65080476", "0.6456765...
0.74180853
0
Function to download videos in specified intervals Takes a list (interval_list) and a path as inputs
def download_interval(interval_list): start = ['start', 'begin', 'beginning', 'head', 'first'] end = ['slut', 'end', 'tail', 'finish', 'finito', 'fin', 'done', 'finished'] # Iterate over the list for link in range(len(interval_list)): try: video = pafy.new(interval_list[link][0], ydl_opts={ 'nocheckcertificate': True, "noplaylist": True}) # Only downloads the video if the video hasn't been downloaded before if not os.path.exists(os.path.join("tmp", f"{video.title}.mp4")): video_s = video.getbestvideo() # TODO: add a way to get the second best stream (third etc.) when an error occurs using Pafy.videostreams and going through the list video_a = video.getbestaudio() # Checks if the end point is a string if interval_list[link][1][1].lower() in end: # Where is the stream, where should we start, how long should it run mp4_vid = ffmpeg.input( video_s.url, ss=interval_list[link][1][0], t=video.duration) mp4_aud = ffmpeg.input( video_a.url, ss=interval_list[link][1][0], t=video.duration) else: # Where is the stream, where should we start, how long should it run mp4_vid = ffmpeg.input( video_s.url, ss=interval_list[link][1][0], t=interval_list[link][1][1]) mp4_aud = ffmpeg.input( video_a.url, ss=interval_list[link][1][0], t=interval_list[link][1][1]) # Do the processing try: ( ffmpeg .concat( # Specify what you want from the streams (v for video and a for audio) mp4_vid['v'], mp4_aud['a'], # One video stream and one audio stream v=1, a=1 ) # Output is title of video with mp4 ending .output(os.path.join("tmp", f'{video.title}.mp4')) .run() ) except TypeError as e: print(f"An error occurred e 0: {e}") except ffmpeg._run.Error as e: print(f"An error occurred e 1: {e}") except Exception as e: print(f"I couldn't download {interval_list[link]} due to: {e}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_whole(no_interval):\n print(os.getcwd())\n SAVE_PATH = 'tmp'\n ydl_opts = {\"nocheckcertificate\": True, \"noplaylist\": True,\n 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'}\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n for video in range(len(no_interval)):\n ...
[ "0.6634631", "0.6432747", "0.63896453", "0.6305958", "0.60365444", "0.59619087", "0.5885181", "0.5815725", "0.56896555", "0.5573953", "0.5560245", "0.5557694", "0.55472976", "0.5520894", "0.5493748", "0.5459092", "0.54333603", "0.54173565", "0.54105514", "0.54061747", "0.5402...
0.79695
0
Function to download pictures from the input sequence
def download_pics(pics_links): for link in range(len(pics_links)): r = requests.get(pics_links[link][0]) with open(os.path.join("tmp", f"{link}.jpg"), "wb") as dl: dl.write(r.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of t...
[ "0.66714674", "0.6665392", "0.662957", "0.65895194", "0.6587865", "0.6502355", "0.64724034", "0.64450634", "0.64316654", "0.6391709", "0.6382724", "0.6302078", "0.6292136", "0.6259905", "0.6259636", "0.6252637", "0.6219414", "0.6212664", "0.61892736", "0.61513776", "0.6140734...
0.6981115
0
r""" Calculate drainage curve based on the image produced by the ``porosimetry`` function. Returns
def get_drainage_data(self): im = self.result sizes = sp.unique(im) R = [] Snwp = [] Vp = sp.sum(im > 0) for r in sizes[1:]: R.append(r) Snwp.append(sp.sum(im >= r)) Snwp = [s/Vp for s in Snwp] data = namedtuple('xy_data', ('radius', 'saturation')) return data(R, Snwp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_drainage_efficiency(self):#, PLOT, FIGURE, DISTRIBUTION):\n\n print ' Reading drainage efficiency'\n\n self.drainage_efficiency = {}\n\n drainage = np.zeros(self.ATTM_nrows * self.ATTM_ncols)\n\n for i in range(0, self.ATTM_nrows * self.ATTM_ncols):\n if self.ATTM_Total_Fractional_Ar...
[ "0.5753122", "0.56378067", "0.55967945", "0.55486226", "0.5391032", "0.53572434", "0.5309457", "0.52709997", "0.5266623", "0.5236764", "0.5235034", "0.52270055", "0.520521", "0.51976097", "0.5173026", "0.51659435", "0.5153408", "0.51311153", "0.51155776", "0.51090014", "0.510...
0.54940647
4
computes the sigmoid of z z z can be a matrix, vector or scalar sigmoid
def sigmoid(z): g = np.zeros(z.shape) # ====================== YOUR CODE HERE ====================== # Instructions: Compute the sigmoid of each value of z (z can be a matrix, # vector or scalar). return 1/(1+np.exp(-z))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sigmoid(z):\r\n \r\n return vSigmoid(z);", "def sigmoid(z): \n return 1/(1 + np.e**(-z))", "def sigmoid(z):\r\n \r\n return 1.0 / (1.0+np.exp(-z))", "def sigmoid(z):\n\treturn 1.0/(1.0+np.exp(-z))", "def sigmoid(z):\n return 1 / (1 + np.exp(-1 * z))", "def sigmoid(z):\n\n s = 1...
[ "0.9156451", "0.89855045", "0.89530945", "0.88890535", "0.88774705", "0.8873134", "0.88591874", "0.88549364", "0.8827887", "0.88264954", "0.88236195", "0.8823586", "0.8804889", "0.8797912", "0.87720233", "0.8767488", "0.87623954", "0.87474865", "0.8720549", "0.8660843", "0.86...
0.8674715
19
Parse vistrone image and annotation file
def parse_anno(self, img_path, anno_path) -> dict: anno_obj = dict() img = cv2.imread(img_path) if len(img.shape) == 3: h, w, d = img.shape[:3] anno_obj['size'] = (w, h, d) else: h, w = img.shape[:2] anno_obj['size'] = (w, h, 1) anno_array = np.loadtxt(anno_path, dtype=np.str, delimiter=',') objects = list() if len(anno_array.shape) == 1: # Just one annotation object obj = self.create_anno(anno_array) if obj: objects.append(obj) else: for anno_line in anno_array: obj = self.create_anno(anno_line) if obj: objects.append(obj) if len(objects) == 0: return dict() anno_obj['objects'] = objects return anno_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unpack_annotation(path):\n buffer = []\n with open(path, 'r') as file:\n lines = file.read()\n\n lines = lines.splitlines()\n for line in lines:\n if not line.startswith('#') and line:\n buffer.append(line)\n\n # Filename to match annotation with photo\n filename = ''...
[ "0.66137487", "0.6137905", "0.6088444", "0.60218704", "0.6018169", "0.6017987", "0.6013739", "0.59796065", "0.5911451", "0.58495057", "0.584628", "0.5830778", "0.58306414", "0.5818035", "0.58085626", "0.5796074", "0.57908374", "0.5716362", "0.57042414", "0.5697437", "0.569646...
0.6034063
3
recourse through an attribute chain to get the ultimate value.
def safe_chain_getattr(obj, attr): return reduce(getattr, attr.split('.'), obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next(self):\n nextattr = self.iterobj.next()\n return (nextattr.name, self.attrs[nextattr.name])", "def _get_effective_attribute(self, attribute_name):\n result = []\n tmp = self[attribute_name]\n if tmp != None:\n result.append( tmp )\n if tmp == None or ...
[ "0.594284", "0.59209895", "0.5796574", "0.54958975", "0.54589653", "0.54450005", "0.5416079", "0.53792626", "0.5287629", "0.52547", "0.5249592", "0.5235394", "0.5232", "0.522848", "0.52224046", "0.520934", "0.52058285", "0.51851255", "0.51851255", "0.51851255", "0.51851255", ...
0.5363558
8
Get chain attribute for an object.
def chain_getattr(obj, attr, value=None): try: return _resolve_value(safe_chain_getattr(obj, attr)) except AttributeError: return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def safe_chain_getattr(obj, attr):\n return reduce(getattr, attr.split('.'), obj)", "def chained_getattr(obj, path):\n target = obj\n for attr in path:\n target = corner_case_getattr(target, attr)\n return target", "def deepgetattr(obj, attr):\n\t\treturn reduce(getattr, attr.split('.'), obj)", "def...
[ "0.6868703", "0.65075165", "0.6485146", "0.64567274", "0.62154573", "0.61727905", "0.61658573", "0.6000796", "0.5964722", "0.5916216", "0.5916216", "0.5876859", "0.5874207", "0.5869589", "0.5869589", "0.5850275", "0.58463216", "0.58114004", "0.5795329", "0.57904404", "0.57904...
0.7126414
0
trim the list to make total length no more than limit.If split specified,a string is return.
def trim_iterable(iterable, limit, *, split=None, prefix='', postfix=''): if split is None: sl = 0 join = False else: sl = len(split) join = True result = [] rl = 0 for element in iterable: element = prefix + element + postfix el = len(element) if len(result) > 0: el += sl rl += el if rl <= limit: result.append(element) else: break if join: result = split.join(result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trim(self, input_words_list):\n def to_be_trimmed(x):\n if len(x) < 3:\n return False\n else:\n return True\n self.trimmed_words_list = list(filter(to_be_trimmed, input_words_list))\n # print('the filtered words are:')\n # for word...
[ "0.61260587", "0.61242557", "0.60332996", "0.5976751", "0.59576887", "0.59357154", "0.59291357", "0.575979", "0.5731277", "0.56639487", "0.5637736", "0.5636854", "0.5605196", "0.5510604", "0.5455997", "0.54495543", "0.5449324", "0.544276", "0.54360414", "0.5429126", "0.542896...
0.66803694
0
It decrypts encrypted messages.
def test_decrypt_encrypted(self): encrypted = encrypt('message') decrypted = decrypt(encrypted) assert decrypted == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt_message(encrypted_message):", "def decrypt(self, data):", "def decrypt_message(self):\r\n\r\n\t\t#Will not let user input useless messages that cannot be decrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to decrypt. --> \")\r\n\t\t\tif self.message ...
[ "0.87886894", "0.7859907", "0.7717388", "0.7570975", "0.7548102", "0.74472094", "0.735235", "0.7315939", "0.73016477", "0.7261866", "0.7248655", "0.72426134", "0.71901", "0.7162575", "0.7121405", "0.7052611", "0.7006807", "0.6988077", "0.6984153", "0.6959311", "0.6911666", ...
0.72249025
12
It decrypts encoded messages as UTF8 strings.
def test_decrypt_encoding(self): encrypted = encrypt('méssåge') decrypted = decrypt(encrypted) assert decrypted == 'méssåge'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt_message(encrypted_message):", "def decrypt_message(msg):\n with urllib.request.urlopen(format_url(main_url+\"decrypt.php\",msg)) as f:\n decryptedmessage = f.read().decode('utf-8',\"strict\")\n return decryptedmessage", "def decrypt(self, data):", "def _decrypt(self, msg):\r\n ...
[ "0.7515797", "0.72070503", "0.7080225", "0.70087695", "0.68919706", "0.6883082", "0.6702499", "0.6700832", "0.6643625", "0.6547494", "0.65426725", "0.65246123", "0.65195185", "0.6513772", "0.6512258", "0.6473068", "0.645004", "0.644082", "0.6412383", "0.63955086", "0.6384664"...
0.70483595
3
It raises an error when trying to decrypt a nonencrypted value.
def test_decrypt_format(self): with pytest.raises(EncryptionError): decrypt('message')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = e...
[ "0.7508658", "0.73156184", "0.7232054", "0.6764023", "0.66845644", "0.657834", "0.6562093", "0.65003294", "0.6460941", "0.6459456", "0.6367024", "0.63215554", "0.6274202", "0.62535083", "0.62495065", "0.62276834", "0.6201334", "0.6190526", "0.618059", "0.61519164", "0.6145261...
0.76258427
0
It accepts a custom decryption key.
def test_decrypt_key(self): key = b'0' * 32 encrypted = encrypt('message', key=key) assert decrypt(encrypted, key=key) == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecryp...
[ "0.6593434", "0.6539305", "0.6523048", "0.65177274", "0.65156114", "0.6480838", "0.645648", "0.63991714", "0.63842076", "0.63480246", "0.634022", "0.6332533", "0.6332092", "0.631312", "0.62985706", "0.6246969", "0.6217749", "0.6211802", "0.6208556", "0.62013805", "0.6200206",...
0.69840544
0
It requires a 32byte key.
def test_decrypt_key_invalid(self): encrypted = encrypt('message', key=b'0' * 32) with pytest.raises(EncryptionError): decrypt(encrypted, key=b'0' * 31)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def create_key ():", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def load_key():", "def test_l...
[ "0.7998701", "0.7112175", "0.70670855", "0.70627075", "0.7016668", "0.6956229", "0.6922364", "0.69079906", "0.683496", "0.6793663", "0.6758785", "0.6736807", "0.6699168", "0.66464436", "0.66464436", "0.6582861", "0.6575875", "0.6575875", "0.6530098", "0.6464395", "0.64615387"...
0.0
-1
It raises an error when an incorrect key is provided.
def test_decrypt_key_incorrect(self): right_key = b'0' * 32 wrong_key = b'1' * 32 encrypted = encrypt('message', key=right_key) with pytest.raises(EncryptionError): decrypt(encrypted, key=wrong_key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_key(self, key):\n raise NotImplementedError", "def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n...
[ "0.7584052", "0.74336976", "0.7209498", "0.7197816", "0.71823823", "0.71537316", "0.70963115", "0.70610356", "0.7014894", "0.6917326", "0.6872797", "0.68682355", "0.6839676", "0.6820613", "0.6819844", "0.68171906", "0.68070513", "0.67946887", "0.67675406", "0.6766828", "0.668...
0.6123274
82
It gets its default key from settings.
def test_decrypt_key_default(self, settings): settings.CHITON_ENCRYPTION_KEY = b'0' * 32 encrypted = encrypt('message') assert decrypt(encrypted) == 'message' settings.CHITON_ENCRYPTION_KEY = b'1' * 32 with pytest.raises(EncryptionError): decrypt(encrypted)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default(self, key):\r\n return self.inherited_settings[key.field_name]", "def get(self, key, default=None):\n return self.settings.get(key, default)", "def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)", "def get_setting_defau...
[ "0.7820905", "0.76434904", "0.7315344", "0.7273974", "0.7266542", "0.7164663", "0.708993", "0.7069916", "0.7067126", "0.70258087", "0.70121825", "0.69252306", "0.686531", "0.6851818", "0.6849022", "0.6843253", "0.678574", "0.6693812", "0.66881984", "0.664332", "0.6612321", ...
0.0
-1
It encrypts messages as base64encoded strings.
def test_encrypt_encoding(self): encrypted = encrypt('message') assert encrypted assert encrypted != 'message' assert type(encrypted) == str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encrypted(data: str) -> str:\n return b64encode(data.encode('ascii')).decode('ascii')", "def encrypt(strings):\r\n rd = ''.join(random.sample(upperchr+lowcarsechr+otherchr+numchr,10))\r\n output = base64.encodestring(strings+mselment+rd)\r\n print output", "def test_encryption_of_string(cipher)...
[ "0.69580686", "0.69413096", "0.6922633", "0.68996924", "0.67965823", "0.6779057", "0.677835", "0.6773155", "0.67529523", "0.67321116", "0.6706291", "0.6699885", "0.66843873", "0.66808355", "0.66681325", "0.6639239", "0.6632102", "0.6602281", "0.6573491", "0.65729386", "0.6543...
0.6409957
33
It accepts a custom encryption key.
def test_encrypt_key(self): encrypted = encrypt('message', key=b'0' * 32) assert encrypted assert encrypted != 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def encryption_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encryption_key\")", "def encryption_key(self) -> Optio...
[ "0.6910627", "0.6908906", "0.6589015", "0.6589015", "0.65658104", "0.64406216", "0.64385355", "0.6403656", "0.6396212", "0.6384059", "0.63731337", "0.6366203", "0.633905", "0.6292146", "0.6261021", "0.62500477", "0.62206835", "0.62103766", "0.61971515", "0.6182859", "0.616854...
0.6712674
2
It requires a 32byte key.
def test_encrypt_key_invalid(self): with pytest.raises(EncryptionError): encrypt('message', key=b'0' * 31)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def create_key ():", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def load_key():", "def test_l...
[ "0.79996747", "0.71131784", "0.70681524", "0.7063389", "0.70164996", "0.6956813", "0.69228464", "0.69088066", "0.6835613", "0.67935926", "0.6759126", "0.6737713", "0.6699715", "0.66468084", "0.66468084", "0.65837467", "0.65774214", "0.65774214", "0.65307164", "0.6464985", "0....
0.60107034
69
It gets its default key from settings.
def test_encrypt_key_default(self, settings): settings.CHITON_ENCRYPTION_KEY = None with pytest.raises(EncryptionError): encrypt('message')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default(self, key):\r\n return self.inherited_settings[key.field_name]", "def get(self, key, default=None):\n return self.settings.get(key, default)", "def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)", "def get_setting_defau...
[ "0.7820905", "0.76434904", "0.7315344", "0.7273974", "0.7266542", "0.7164663", "0.708993", "0.7069916", "0.7067126", "0.70258087", "0.70121825", "0.69252306", "0.686531", "0.6851818", "0.6849022", "0.6843253", "0.678574", "0.6693812", "0.66881984", "0.664332", "0.6612321", ...
0.0
-1
It does not produce the same message using the same key.
def test_encrypt_nonce(self): key = b'0' * 32 message = 'message' assert encrypt(message, key=key) != encrypt(message, key=key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def create_key(message, ...
[ "0.6353171", "0.6112065", "0.6050746", "0.60195196", "0.60014725", "0.5931781", "0.59107494", "0.58862585", "0.58743024", "0.5822409", "0.5816319", "0.5803703", "0.5773151", "0.57707894", "0.5769746", "0.57522786", "0.5739855", "0.56940407", "0.5691956", "0.5683916", "0.56509...
0.55720454
26
It reencrypts an encrypted message using a new key.
def test_rekey(self): old_key = b'0' * 32 new_key = b'1' * 32 old_encrypted = encrypt('message', key=old_key) new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key) assert decrypt(new_encrypted, key=new_key) == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n ass...
[ "0.7298857", "0.7298796", "0.71268785", "0.66500294", "0.6324676", "0.62568414", "0.6190534", "0.6128457", "0.61111814", "0.6098401", "0.60541093", "0.59321904", "0.5931756", "0.5884302", "0.587639", "0.58716655", "0.5832455", "0.58202076", "0.5819175", "0.5817855", "0.580720...
0.7975824
0
It raises an error when trying to rekey a nonencrypted value.
def test_rekey_non_encrypted(self): with pytest.raises(EncryptionError): rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):...
[ "0.7787494", "0.7165129", "0.7140279", "0.71330994", "0.69257516", "0.6686824", "0.6646899", "0.6348278", "0.6192973", "0.6057841", "0.60305434", "0.6001477", "0.5970502", "0.58401686", "0.5836722", "0.5806258", "0.5804228", "0.57949257", "0.57405263", "0.57361877", "0.571489...
0.82164097
0
It raises an error when given an invalid new key.
def test_rekey_key_format(self): old_key = b'0' * 32 encrypted = encrypt('message', key=old_key) with pytest.raises(EncryptionError): rekey(encrypted, old_key=old_key, new_key=b'1' * 31)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _newKey(self, key):\n pass", "def _check_key(self, key):\n raise NotImplementedError", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def test_set_invalid_key(test_file):\n md = OSX...
[ "0.71822304", "0.70056105", "0.69519156", "0.6922246", "0.6791061", "0.6753472", "0.6749198", "0.67076725", "0.66495234", "0.6584771", "0.6557083", "0.65199524", "0.65018636", "0.64907044", "0.6454335", "0.64416265", "0.6438558", "0.6431475", "0.6424363", "0.6396235", "0.6389...
0.7050168
1
It uses the settings for the default old and new key.
def test_rekey_defaults(self, settings): old_key = b'0' * 32 new_key = b'1' * 32 settings.CHITON_ENCRYPTION_KEY = new_key settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key encrypted = encrypt('message', key=old_key) rekeyed = rekey(encrypted) assert decrypt(rekeyed) == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_key(self):\n self.__prev_key = self.__new_key", "def update_dict(new,old):", "def _newKey(self, key):\n pass", "def update_default_from_dict(self, key, value):\n pass", "def test_overwrite(self):\n set_default_for_missing_keys('hello world')\n set_default_for_m...
[ "0.65626293", "0.6550737", "0.6313369", "0.6242143", "0.61064005", "0.60898244", "0.6082216", "0.60713875", "0.5890576", "0.5830049", "0.58023095", "0.5726684", "0.5720197", "0.5707687", "0.56511974", "0.5633935", "0.5569308", "0.54979086", "0.5470096", "0.5455716", "0.545565...
0.62328917
4
Fetches domain by its name
def get_by_name(name): return database.get_all(Domain, name, field="name").all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_domain_for_name(self, name):\n domain = self.connection.lookupByName(name)\n return domain", "def lookup(self, domain_name, validate=True):\n try:\n domain = self.get_domain(domain_name, validate)\n except:\n domain = None\n return domain", "def...
[ "0.82388127", "0.7296342", "0.72872925", "0.71128905", "0.70768034", "0.7075855", "0.70597684", "0.69037956", "0.68828833", "0.6841402", "0.6727464", "0.65337366", "0.64684", "0.6401447", "0.63366103", "0.63004", "0.6292597", "0.62468135", "0.62434566", "0.62265456", "0.61929...
0.654201
11
Return True if domain is marked sensitive
def is_domain_sensitive(name): query = database.session_query(Domain) query = query.filter(and_(Domain.sensitive, Domain.name == name)) return database.find_all(query, Domain, {}).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False", "def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_secure_site_enabled\")", "def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:\n ...
[ "0.5945297", "0.5943389", "0.5943389", "0.58801776", "0.5860374", "0.5823208", "0.58100474", "0.5775172", "0.57475305", "0.57442623", "0.57067436", "0.56709236", "0.56709236", "0.56709236", "0.56595856", "0.56003463", "0.55976045", "0.5583748", "0.5570339", "0.5522135", "0.55...
0.7324499
0
Create a new domain
def create(name, sensitive): domain = Domain(name=name, sensitive=sensitive) return database.create(domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_domain(DomainName=None):\n pass", "def create_domain(self, domain: str) -> Session:\n uri = f\"{self.uri}/domains\"\n data = {\n \"hostname\": domain\n }\n response = self.request(uri=uri, method=\"POST\", data=data)\n\n return response", "def create_...
[ "0.85276437", "0.7999616", "0.7635129", "0.7572509", "0.745609", "0.7389668", "0.73356265", "0.7306637", "0.72217894", "0.72094274", "0.71779686", "0.71715486", "0.7072683", "0.70593244", "0.7049416", "0.7021447", "0.70118153", "0.6965163", "0.6953879", "0.68060184", "0.67575...
0.743728
5
Update an existing domain
def update(domain_id, name, sensitive): domain = get(domain_id) domain.name = name domain.sensitive = sensitive database.update(domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_domain(domain_name):\n\n if request.method == \"POST\":\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Check if domain.provider object exists to make sure\n # duplicate Provider.provider_url is not created\n provider = ...
[ "0.7216133", "0.6992674", "0.69599164", "0.6770144", "0.6691671", "0.6631902", "0.66250306", "0.66209924", "0.6600841", "0.65592813", "0.6530738", "0.65130335", "0.6412239", "0.63919693", "0.63866", "0.6360199", "0.6326602", "0.6326602", "0.63022065", "0.6297731", "0.62386405...
0.77741724
0
Helper to parse REST Api requests
def render(args): query = database.session_query(Domain) filt = args.pop("filter") certificate_id = args.pop("certificate_id", None) if filt: terms = filt.split(";") query = database.filter(query, Domain, terms) if certificate_id: query = query.join(Certificate, Domain.certificates) query = query.filter(Certificate.id == certificate_id) return database.sort_and_page(query, Domain, args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse (self, request):\n\n data = {}\n body_start = request.find('\\r\\n\\r\\n')\n if body_start == -1:\n data['body'] = None\n else:\n data['body'] = request[body_start+4:]\n parts = request.split(' ', 2)\n data['method'] = parts[0]\n data...
[ "0.6661732", "0.6361013", "0.6351881", "0.63334227", "0.6265376", "0.62463653", "0.61369115", "0.61359483", "0.6097976", "0.6052464", "0.6047303", "0.60433483", "0.59891516", "0.58926797", "0.5875995", "0.5862154", "0.5862154", "0.5862154", "0.5862154", "0.581381", "0.579058"...
0.0
-1
Create a logfile that the rest of the script can write to.
def log_start(): scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) scriptName = os.path.splitext(os.path.basename(__file__))[0] log = logging.getLogger('cam_server') hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.INFO) return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\...
[ "0.77096045", "0.7498354", "0.70676357", "0.7021383", "0.69633764", "0.69220144", "0.6866777", "0.68400943", "0.6803973", "0.6764277", "0.6756204", "0.6751284", "0.6738857", "0.67022955", "0.66923296", "0.66836953", "0.6674281", "0.66629696", "0.6649449", "0.664781", "0.65934...
0.6588597
24
Establish a TCP connection to the indiserver via port 7624
def connect_to_indi(): indiclient=IndiClient() indiclient.setServer("localhost",7624) # Ensure the indiserver is running if (not(indiclient.connectServer())): print("No indiserver running on "+indiclient.getHost()+":"+str(indiclient.getPort())+" - Try to run") print(" indiserver indi_sx_ccd") sys.exit(1) return indiclient
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SCPI_sock_connect(ipaddress,port=5025):\n\n try:\n session=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #session.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n #session.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, 0)\n session.connect((ipaddress,port))\n exce...
[ "0.62255913", "0.62050253", "0.6195879", "0.6171926", "0.6135462", "0.6120585", "0.606939", "0.60673463", "0.602371", "0.6017615", "0.60021055", "0.60011834", "0.5976623", "0.5967753", "0.5963107", "0.59526026", "0.59498817", "0.5947324", "0.5940068", "0.5910819", "0.59102845...
0.68877107
0
Connection routine for the CCD (given below in ccd variable). The following CCD properties are accessed. More can be found by going to indilib.org. CONNECTION Switch CCD_EXPOSURE Number CCD1 BLOB CCD_BINNING Number CCD_ABORT_EXPOSURE Number CCD_TEMPERATURE Number CCD_COOLER Switch CCD_FRAME_TYPE Switch
def connect_to_ccd(): ccd="SX CCD SXVR-H694" device_ccd=indiclient.getDevice(ccd) while not(device_ccd): time.sleep(0.5) device_ccd=indiclient.getDevice(ccd) print("Searching for device...") print("Found device") ccd_connect=device_ccd.getSwitch("CONNECTION") while not(ccd_connect): time.sleep(0.5) ccd_connect=device_ccd.getSwitch("CONNECTION") if not(device_ccd.isConnected()): ccd_connect[0].s=PyIndi.ISS_ON # the "CONNECT" switch ccd_connect[1].s=PyIndi.ISS_OFF # the "DISCONNECT" switch indiclient.sendNewSwitch(ccd_connect) ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE") while not(ccd_exposure): time.sleep(0.5) ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE") # inform the indi server that we want to receive the # "CCD1" blob from this device indiclient.setBLOBMode(PyIndi.B_ALSO, ccd, "CCD1") ccd_ccd1=device_ccd.getBLOB("CCD1") while not(ccd_ccd1): time.sleep(0.5) ccd_ccd1=device_ccd.getBLOB("CCD1") # get access to setting the CCD's binning value ccd_bin=device_ccd.getNumber("CCD_BINNING") while not(ccd_bin): time.sleep(0.5) ccd_bin=device_ccd.getNumber("CCD_BINNING") # get access to aborting the CCD's exposure ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE") while not(ccd_abort): time.sleep(0.5) ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE") # get access to the CCD's temperature value ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE") while not(ccd_temp): time.sleep(0.5) ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE") # get access to switching the CCD's cooler on/off ccd_cooler=device_ccd.getSwitch("CCD_COOLER") while not(ccd_cooler): time.sleep(0.5) ccd_cooler=device_ccd.getSwitch("CCD_COOLER") # get access to switching the CCD's image frame type ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE") while not(ccd_frame): time.sleep(0.5) ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE") return ccd_exposure, ccd_ccd1, ccd_bin, ccd_abort, ccd_temp, ccd_cooler, ccd_frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_circ():\n\n set_mode(mode_cc) # set operation mode to CC\n time.sleep(.250)\n set_CC_current(cc_current=0) # set CC mode current to 0 amps\n time.sleep(.1)\n \n oc_vals = get_input_values() # read open circuits levels\n oc_data_point = data_point(oc_vals) # create data point for ope...
[ "0.5991389", "0.5908873", "0.563912", "0.5434294", "0.54057074", "0.53454", "0.53262013", "0.5306355", "0.52807844", "0.52574426", "0.5246854", "0.5221757", "0.5210567", "0.51662695", "0.51509064", "0.5141476", "0.51294994", "0.5121288", "0.5110107", "0.5095082", "0.50932145"...
0.7094732
0
Find the last numbered image in the current directory.
def last_image(fileDir): lastNum = 0 lastImg = '' # find the name and number of the last image in the current directory for f in os.listdir(fileDir): if os.path.isfile(os.path.join(fileDir, f)): file_name = os.path.splitext(f)[0] file_name2 = file_name[4:] try: file_num = int(file_name2) if file_num > lastNum: lastNum = file_num lastImg = os.path.join(fileDir, f) except ValueError: 'The file name "%s" is not an integer. Skipping' % file_name return lastNum, lastImg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_last_counter():\n counter = imageNumStart\n if imageNumOn:\n image_ext = \".jpg\"\n search_str = imagePath + \"/*\" + image_ext\n file_prefix_len = len(imagePath + imageNamePrefix)+1\n try:\n # Scan image folder for most recent jpg file\n # and try to e...
[ "0.7381124", "0.71163946", "0.6752403", "0.65669954", "0.651449", "0.6455671", "0.6257701", "0.62144595", "0.61207634", "0.6083774", "0.60167074", "0.6005787", "0.5981818", "0.59797287", "0.5971447", "0.597075", "0.59043884", "0.5898508", "0.58921754", "0.5875937", "0.581396"...
0.7685043
0
Sends an exposure command to the CCD given the type of frame and exposure time. The received BLOB is of FITS type and is
def exposure(frameType, expTime): blobEvent.clear() # set the specified frame type if frameType.lower() == 'light': ccd_frame[0].s = PyIndi.ISS_ON ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'bias': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_ON ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'dark': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_ON ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'flat': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_frame) # set the value for the next exposure ccd_exposure[0].value=expTime indiclient.sendNewNumber(ccd_exposure) # wait for the exposure blobEvent.wait() for blob in ccd_ccd1: # pyindi-client adds a getblobdata() method to IBLOB item # for accessing the contents of the blob, which is a bytearray in Python image_data=blob.getblobdata() # write the byte array out to a FITS file global imgNum global imgName imgNum += 1 fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits' f = open(fileName, 'wb') f.write(image_data) f.close() imgName = fileName return fileName
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expose(self, cmd, expTime, expType):\n\n if not expType:\n expType = 'test'\n if cmd:\n cmd.inform('exposureState=\"exposing\"')\n if expType not in ('bias', 'test') and expTime > 0:\n time.sleep(expTime + self._exposureOverheadTime())\n\n if cmd:\n ...
[ "0.66172343", "0.66024214", "0.61997694", "0.6147396", "0.6001152", "0.5911744", "0.5895627", "0.5890128", "0.5692038", "0.5651894", "0.5623061", "0.56096864", "0.54805845", "0.54146963", "0.5291824", "0.5205684", "0.5177268", "0.51427567", "0.51357037", "0.5105987", "0.50998...
0.7639681
0
Changes CCD parameters/settings based on the given arguments
def setParams(commandList): for i in commandList: # set the bin mode (1x1 or 2x2) if 'bin=' in i: try: bin = int(i.replace('bin=','')) if bin >= 1 and bin <= 2: ccd_bin[0].value = bin ccd_bin[1].value = bin indiclient.sendNewNumber(ccd_bin) response = 'OK: Bin mode set to '+str(bin)+'x'+str(bin) else: response = 'BAD: Invalid Bin Mode' except ValueError: response = 'BAD: Invalid Bin Mode' # turn the cooler on/off elif 'cooler=' in i: cooler = i.replace('cooler=','') if cooler.lower() == 'on': ccd_cooler[0].s=PyIndi.ISS_ON # the "COOLER_ON" switch ccd_cooler[1].s=PyIndi.ISS_OFF # the "COOLER_OFF" switch indiclient.sendNewSwitch(ccd_cooler) response = 'OK: Cooler turned '+cooler elif cooler.lower() == 'off': ccd_cooler[0].s=PyIndi.ISS_OFF # the "COOLER_ON" switch ccd_cooler[1].s=PyIndi.ISS_ON # the "COOLER_OFF" switch indiclient.sendNewSwitch(ccd_cooler) response = 'OK: Cooler turned '+cooler else: response = 'BAD: Invalid cooler set' # set the temperature setpoint (-40C - 0C) elif 'temp=' in i: try: temp = float(i.replace('temp=','')) if temp >= -40 and temp <= 0: response = 'OK: Setting temperature setpoint to '+str(temp) ccd_temp[0].value = temp indiclient.sendNewNumber(ccd_temp) else: response = 'BAD: Invalid temperature setpoint' except ValueError: response = 'BAD: Invalid temperature setpoint' # set the image output directory elif 'fileDir=' in i: try: global imgNum global imgName global fileDir tempFileDir = i.replace('fileDir=','') if tempFileDir[0] == '~': tempFileDir = os.path.expanduser('~')+tempFileDir[1:] if tempFileDir[len(tempFileDir)-1] != '/': tempFileDir = tempFileDir+'/' if not os.path.exists(tempFileDir): os.makedirs(tempFileDir) imgNum, imgName = last_image(tempFileDir) fileDir = tempFileDir response = 'OK: File directory set to '+fileDir #run_image_display(fileDir) except FileNotFoundError: response = 'BAD: Directory does not exist' # set the temperature setpoint (-40C - 0C) elif 'frameType=' in i: try: frameType = i.replace('frameType=','') if frameType.lower() == 'light': ccd_frame[0].s = PyIndi.ISS_ON ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) response = 'OK: CCD frame type set to '+frameType elif frameType.lower() == 'bias': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_ON ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) response = 'OK: CCD frame type set to '+frameType elif frameType.lower() == 'dark': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_ON ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) response = 'OK: CCD frame type set to '+frameType elif frameType.lower() == 'flat': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_frame) response = 'OK: CCD frame type set to '+frameType else: response = 'BAD: Invalid frame type' except ValueError: response = 'BAD: Invalid frame type' else: response = 'BAD: Invalid Set'+'\n'+response return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setCcdMode(*argv):", "def set_parameter_values(self, c1, c2):\n self.c1 = c1\n self.c2 = c2", "def set_parameters(self, args):\n self.args = args\n\n if args.testing:\n self.delay_close()\n\n if args.source == \"simulation\":\n log.info(\"Create simu...
[ "0.7167858", "0.60452235", "0.60324895", "0.5878222", "0.57493514", "0.5714063", "0.56342685", "0.56257355", "0.5574821", "0.5573054", "0.55587506", "0.55421853", "0.55330527", "0.55330527", "0.55330527", "0.55330527", "0.55330527", "0.55182", "0.5511414", "0.55045736", "0.54...
0.528423
39
Determines what to do with the incoming data, whether it is sending an exposure command or setting a parameter. This is a separate method from handle_client() because it is called as a new thread, so ensure the exposure is nonblocking.
def handle_command(log, writer, data): response = 'BAD: Invalid Command' commandList = data.split() try: if commandList[0] == 'expose': if len(commandList) == 3: if commandList[1] == 'light' or commandList[1] == 'dark' or commandList[1] == 'flat': expType = commandList[1] expTime = commandList[2] try: float(expTime) if float(expTime) > 0: expTime = float(expTime) fileName = exposure(expType, expTime) response = 'OK\n'+'FILENAME = '+fileName else: response = 'BAD: Invalid Exposure Time' except ValueError: response = 'BAD: Invalid Exposure Time' elif len(commandList) == 2: if commandList[1] == 'bias': expType = commandList[1] try: fileName = exposure(expType, 0.0) response = 'OK\n'+'FILENAME: '+fileName except ValueError: response = 'BAD: Invalid Exposure Time' elif commandList[0] == 'set': if len(commandList) >= 1: response = setParams(commandList[1:]) except IndexError: response = 'BAD: Invalid Command' # tell the client the result of their command & log it #log.info('RESPONSE = '+response) #writer.write((response+'\n---------------------------------------------------\n').encode('utf-8')) writer.write((response+'\nDONE\n').encode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(self):\n saw_error = False\n try:\n analog_gain = float(self.value_analog.get())\n except:\n print(\"analog must be floating point value\")\n self.value_analog.set(str(self.tcp_comms.tcp_params.analog_gain_target))\n saw_error = True\n ...
[ "0.61772895", "0.6057597", "0.59728074", "0.5919234", "0.57503784", "0.5750175", "0.5683027", "0.5636862", "0.5570763", "0.5540126", "0.5496406", "0.5487305", "0.54842824", "0.5481451", "0.54694", "0.54545176", "0.5411232", "0.54028386", "0.5396897", "0.53862023", "0.5382583"...
0.57042426
6
This is the method that receives the client's data and decides what to do with it. It runs in a loop to always be accepting new connections. If the data is 'status', the CCD status is returned. If the data is 'stop', the current exposure is stopped. If the data is anything else, a new thread is created and the data is sent to handle_command().
async def handle_client(reader, writer): request = None # loop to continually handle incoming data while request != 'quit': request = (await reader.read(255)).decode('utf8') print(request.encode('utf8')) #log.info('COMMAND = '+request) writer.write(('COMMAND = '+request.upper()+'\n').encode('utf8')) response = 'BAD' # check if data is empty, a status query, or potential command dataDec = request if dataDec == '': break elif 'status' in dataDec.lower(): response = 'OK' # check if the command thread is running try: if exposureState() > 0: response = response + '\nBUSY' else: response = response + '\nIDLE' except: response = response + '\nIDLE' if ccd_frame[0].s == PyIndi.ISS_ON: frameType = 'LIGHT' elif ccd_frame[1].s == PyIndi.ISS_ON: frameType = 'BIAS' elif ccd_frame[2].s == PyIndi.ISS_ON: frameType = 'DARK' elif ccd_frame[3].s == PyIndi.ISS_ON: frameType = 'FLAT' response = response+\ '\nBIN MODE = '+str(ccd_bin[0].value)+'x'+str(ccd_bin[1].value)+\ '\nCCD TEMP = '+str(ccd_temp[0].value)+\ 'C\nLAST FRAME TYPE = '+str(frameType)+\ '\nFILE DIR = '+str(fileDir)+\ '\nLAST IMAGE = '+str(imgName) # send current status to open connection & log it #log.info('RESPONSE: '+response) writer.write((response+'\nDONE\n').encode('utf-8')) elif 'stop' in dataDec.lower(): # check if the command thread is running try: if comThread.is_alive(): response = 'OK: aborting exposure' ccd_abort[0].s=PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_abort) blobEvent.set() #Ends the currently running thread. response = response+'\nExposure Aborted' else: response = 'OK: idle' except: response = 'OK: idle' # send current status to open connection & log it #log.info('RESPONSE = '+response) writer.write((response+'\nDONE\n').encode('utf-8')) else: # check if the command thread is running, may fail if not created yet, hence try/except try: if comThread.is_alive(): response = 'BAD: busy' # send current status to open connection & log it #log.info('RESPONSE = '+response) writer.write((response+'\nDONE\n').encode('utf-8')) else: # create a new thread for the command comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,)) comThread.start() except: # create a new thread for the command comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,)) comThread.start() await writer.drain() writer.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleRecvData(self, data):\n\n\t\t#Look for commands\n\t\tif data == 'Hello':\n\t\t\t#Inform client it is 'connected'\n\t\t\tself.transmit(\"Welcome\")\n\n\t\telif data == 'kill':\t\n\t\t\t#Stop the server running\n\t\t\tself.running = False\n\n\t\telif data == 'control':\n\t\t\t#Print out if in control of ca...
[ "0.7136164", "0.66953665", "0.6398843", "0.6384446", "0.63746595", "0.6355251", "0.6276334", "0.62578547", "0.6232429", "0.6153676", "0.61311704", "0.60923487", "0.60865486", "0.6026433", "0.59538", "0.5909406", "0.5902352", "0.58999", "0.58918047", "0.58892024", "0.58742887"...
0.6803808
1
Generates a sample configuration.
def create_default(self): self.database.lifetime = 604800 self.database.path_media = '../data/media.db' self.database.path_playlist = '../data/playlist.db' self.indexing.audio.rules = [IndexerRuleConfig()] self.indexing.audio.rules[0].directory = '/mnt/hdd/Audio' self.indexing.audio.rules[0].extensions = ['.flac', '.mp3', '.ogg', '.wav'] self.indexing.audio.rules[0].pattern = '{}/{}/{} {}'.format( get_complete_tag(TAG_ARTIST), get_complete_tag(TAG_ALBUM), get_complete_tag(TAG_NUMBER), get_complete_tag(TAG_TITLE)) self.indexing.image.rules = [IndexerRuleConfig()] self.indexing.image.rules[0].directory = '/mnt/hdd/Image' self.indexing.image.rules[0].extensions = ['.gif', '.jpg', '.jpeg', '.png'] self.indexing.image.rules[0].pattern = '{}/{}'.format( get_complete_tag(TAG_ALBUM), get_complete_tag(TAG_TITLE)) self.indexing.video.ignore_revisions = False self.indexing.video.subtitle_rules = [IndexerRuleConfig()] self.indexing.video.subtitle_rules[0].directory = '/mnt/hdd/Video' self.indexing.video.subtitle_rules[0].extensions = ['.srt'] self.indexing.video.subtitle_rules[0].pattern = '{}/Subtitle/{}/{}/{}/{}/{}'.format( get_complete_tag(TAG_TITLE), get_complete_tag(TAG_QUALITY), get_complete_tag(TAG_LANGUAGES), get_complete_tag(TAG_LANGUAGE), get_complete_tag(TAG_ANY), get_complete_tag(TAG_EPISODE_TITLE)) self.indexing.video.video_rules = [IndexerRuleConfig()] self.indexing.video.video_rules[0].directory = '/mnt/hdd/Video' self.indexing.video.video_rules[0].extensions = ['.avi', '.flv', '.mkv', '.mp4'] self.indexing.video.video_rules[0].pattern = '{}/Content/{}/{}/{}/{}'.format( get_complete_tag(TAG_TITLE), get_complete_tag(TAG_QUALITY), get_complete_tag(TAG_LANGUAGES), get_complete_tag(TAG_ANY), get_complete_tag(TAG_EPISODE_TITLE)) self.logging.enabled = True self.logging.level = 'error' self.logging.max_size_bytes = 524288 self.logging.path = '../data/log.txt' self.multimedia.av_player = 'vlc' self.multimedia.av_player_path = '/usr/bin/vlc-wrapper' self.multimedia.image_viewer = 'feh' self.multimedia.image_viewer_path = '/usr/bin/feh' self.web.port = 8095
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sample_hyperparameters(self):\n\t\tconfig = {}\n\t\tfor attr, option in self._config_options.items():\n\t\t\tprint('Sampling', attr)\n\t\t\tconfig[attr] = option.sample()\n\t\treturn config", "def random_configuration(self):\n raise NotImplementedError", "def generate_configuration(directory):\n ...
[ "0.65736026", "0.6416811", "0.6385334", "0.6240616", "0.621911", "0.6212363", "0.60647005", "0.60343623", "0.6023183", "0.6003032", "0.5997811", "0.5997811", "0.59653294", "0.59653294", "0.59533024", "0.5914431", "0.5902483", "0.5882451", "0.5871193", "0.5864147", "0.5847218"...
0.0
-1
Find all prime numbers between 0 and n
def get_primes(n): primes = [True] * (n / 2) for i in range(int((n / 2 - 1) / 2) >> 1): for j in range((i * (i + 3) << 1) + 3, n / 2, (i << 1) + 3): primes[j] = False return [2] + [((i << 1) + 3) for i in range(n / 2) if (primes[i])]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]", "def find_n_primes(n):\n primes = [ ]\n\n if n < 2:\n return None;\n\n primes.append(2)\n\n for i in range(3, n + 1, 2):\n is_prime = True\n for p in primes:\n if i % p is 0:\n is_prime = False\n continue\n ...
[ "0.8414769", "0.8174812", "0.8154782", "0.8066897", "0.79940116", "0.7972602", "0.7941718", "0.79004717", "0.78921247", "0.7884613", "0.786188", "0.786017", "0.7860028", "0.78575575", "0.78575575", "0.7856777", "0.78546125", "0.78280634", "0.78155655", "0.7812109", "0.7803009...
0.77787405
21
Get all data spread across multiple pages
def _get_all_data(self, resource): response = self._get_raising('{}{}?per_page=100&page=1'.format( self.GH_API_ENDPOINT, resource )) yield from response.json() while 'next' in response.links: response = self._get_raising(response.links['next']['url']) yield from response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n current...
[ "0.75263435", "0.739832", "0.69225633", "0.6902741", "0.6879651", "0.68711656", "0.6810762", "0.67072725", "0.67072725", "0.6681827", "0.66562927", "0.66062623", "0.65945363", "0.6549788", "0.6540712", "0.6535249", "0.6503686", "0.64717466", "0.6464984", "0.6408598", "0.63991...
0.638738
23
Get list of names of accessible repositories (including owner)
def list_repositories(self): data = self._get_all_data('/user/repos') return [repo['full_name'] for repo in data]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n ...
[ "0.7608418", "0.7267118", "0.7101556", "0.70416945", "0.6976196", "0.69618136", "0.6874849", "0.6862077", "0.68582284", "0.68060064", "0.6804673", "0.6765574", "0.67367995", "0.67278445", "0.67108905", "0.66979", "0.6602462", "0.6571694", "0.65691054", "0.6552383", "0.6520411...
0.78467643
0
Get dict of labels with colors for given repository slug
def list_labels(self, repository): data = self._get_all_data('/repos/{}/labels'.format(repository)) return {l['name']: str(l['color']) for l in data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"...
[ "0.65194696", "0.5914808", "0.5822218", "0.5784747", "0.57322896", "0.5691123", "0.5686196", "0.5633396", "0.56304544", "0.56205434", "0.55804044", "0.5575821", "0.5575821", "0.5575821", "0.5575821", "0.5560394", "0.55267113", "0.55267113", "0.5519801", "0.5494467", "0.549199...
0.7589527
0
Create new label in given repository
def create_label(self, repository, name, color, **kwargs): data = {'name': name, 'color': color} response = self.session.post( '{}/repos/{}/labels'.format(self.GH_API_ENDPOINT, repository), json=data ) if response.status_code != 201: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_label(self, org, name):\n pass", "def test_issue_create_label(self):\n pass", "async def new_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n issue_number_found = ISSUE_RE.search(\n event.data[\"pull_request\"][\"title\"])...
[ "0.7274326", "0.7140176", "0.67611367", "0.6694404", "0.6539713", "0.6458034", "0.64482284", "0.6406368", "0.63728607", "0.6343112", "0.6218493", "0.6180373", "0.6086001", "0.6058881", "0.60423875", "0.6024324", "0.6002186", "0.5988165", "0.5969681", "0.59262604", "0.5906312"...
0.77359784
0
Update existing label in given repository
def update_label(self, repository, name, color, old_name=None, **kwargs): data = {'name': name, 'color': color} response = self.session.patch( '{}/repos/{}/labels/{}'.format( self.GH_API_ENDPOINT, repository, old_name or name ), json=data ) if response.status_code != 200: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qual...
[ "0.6681116", "0.6653021", "0.64337254", "0.6265893", "0.62407804", "0.6226869", "0.61520106", "0.6127353", "0.61121166", "0.61037016", "0.6094828", "0.603661", "0.60320926", "0.59240484", "0.59008676", "0.58834106", "0.58732027", "0.5860932", "0.5839947", "0.58363944", "0.580...
0.72466385
0
Delete existing label in given repository
def delete_label(self, repository, name, **kwargs): response = self.session.delete( '{}/repos/{}/labels/{}'.format( self.GH_API_ENDPOINT, repository, name ) ) if response.status_code != 204: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_delete_label(self):\n pass", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def repository_delete(ctx: click.Context, repository_name):\n subcommand_repository.cmd_delete(ctx.obj, repository_name)", "def delete_label(id):\...
[ "0.7348744", "0.716896", "0.7127642", "0.6931051", "0.6877669", "0.67660433", "0.67263836", "0.67033213", "0.6610836", "0.6601114", "0.6599512", "0.64673704", "0.64669335", "0.64562386", "0.6455143", "0.63759565", "0.6242319", "0.6198236", "0.61135364", "0.60569805", "0.60212...
0.8369633
0
Ask for a number between low and high until actually given one. Ask for a number, and if the response is outside the bounds keep asking until you get a number that you think is OK
def stubborn_asker(low, high): import random a=random.randint(1,100) for i in range(1,10): n=input('enter the number: ') if n.isdigit(): n=int(n) if n==a: return('Correct') break elif n>a: return('The number is bigger.') elif n<a: return('The number is smaller.') else: return('please enter an integer.') i+=1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ask_number(question, low, high):\n response = None\n while response not in range(low, high, 1):\n response = input(question)\n return response", "def ask_number(question, low, high):\n response = None\n while response not in range (low, high):\n response = int(input(question)...
[ "0.8282392", "0.8165286", "0.8158761", "0.8158761", "0.8125007", "0.7887655", "0.74961793", "0.7476491", "0.7303107", "0.70028126", "0.69437313", "0.69389933", "0.69310325", "0.689875", "0.68148214", "0.67976105", "0.67912346", "0.6782751", "0.66932416", "0.6617285", "0.65194...
0.67488945
18
Extracts feature vectors from a given model and dataset and writes them, along with labels, to a file. This function works for any model whose forward() method returns, on any given input x, the pair (prediction on x, feature vector for x) and more generally, any model whose second return value is a feature vector.
def extract_feature_vectors(model, data_loader, parameters, features_file_path): feature_vectors, label_vectors = [], [] # Set model to evaluation mode model.eval() # Show progress bar while iterating over mini-batches with tqdm(total=len(data_loader)) as progress_bar: for i, (X_batch, Y_batch) in enumerate(data_loader): # Dimensions of the input Tensor batch_size, channels, height, width = X_batch.size() # If GPU available, enable CUDA on data if parameters.cuda: X_batch = X_batch.cuda() Y_batch = Y_batch.cuda() # Wrap the input tensor in a Torch Variable X_batch_variable = Variable(X_batch, volatile=True) # Run the model on this batch of inputs, obtaining a Variable of predicted labels and a Variable of features Y_predicted, features = model(X_batch_variable) # Convert the features Variable (of size [batch_size, 1024]) to a Tensor, move it to # CPU, and convert it to a NumPy array features_numpy = features.data.cpu().numpy() # Move the labels Tensor (of size [batch_size, 14]) to CPU and convert it to a NumPy array Y_numpy = Y_batch.cpu().numpy() # For each example in the batch, record its features and labels for j in range(batch_size): feature_vectors.append(features_numpy[j,:]) label_vectors.append(Y_numpy[j,:]) progress_bar.update() utils.write_feature_and_label_vectors(features_file_path, feature_vectors, label_vectors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_vectors (feat_vec = None, labels = None, file_extension = None):\n\n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n\n prettyPrint('Saving feature vector file: {0} ... \\n'\n 'Saving Labels file: {1} ... '.format(...
[ "0.6803554", "0.6240717", "0.6221068", "0.6063516", "0.6009722", "0.5950848", "0.5824239", "0.58020353", "0.57663727", "0.57563233", "0.5755955", "0.57133436", "0.5701105", "0.5675284", "0.56728137", "0.56672686", "0.56518734", "0.56423616", "0.56261265", "0.56195384", "0.560...
0.6868015
0
Returns the average distance between pairs of vectors in a given list of vectors.
def average_distance_between_vectors(vectors, distance): vectors = numpy.array(vectors) vectors = vectors - numpy.mean(vectors, axis=0) vectors = normalize(vectors) vectors = list(vectors) average_distance = utils.RunningAverage() for vector_1, vector_2 in itertools.combinations(vectors, r=2): # All pairs of vectors average_distance.update(distance(vector_1, vector_2)) return average_distance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average_vectors(vectors_list):\n return np.mean(vectors_list, axis=0)", "def compute_average(vec_list):\r\n return np.sum(vec_list, axis = 0)/len(vec_list)", "def average(cls, vectors):\n return cls.sum(vectors) / len(vectors)", "def vector_mean(vectors: List[Vector]) -> Vector:\n n = len...
[ "0.7456902", "0.7266837", "0.7161427", "0.66369", "0.66369", "0.6613315", "0.6526983", "0.6526983", "0.652161", "0.652161", "0.652161", "0.6493121", "0.6426592", "0.641932", "0.61453825", "0.6135802", "0.6098236", "0.60969055", "0.60628074", "0.6040219", "0.6035585", "0.601...
0.8062336
0
Reads feature vectors and labels from a file and prints information about their clustering properties. Here, we think of the space of feature vectors, and consider a vector v_i to be in cluster j if j is one of the labels for example i.
def analyze_feature_vector_clusters(features_file_path, distance=utils.L2_distance): feature_vectors, label_vectors = utils.read_feature_and_label_vectors(features_file_path) logging.info('Building clusters...') # Map from (integer j) --> (list of indices i such that feature_vectors[i] is in cluster j) # Cluster 0 indicates no disease indices_for_label = map_labels_to_example_indices(label_vectors) logging.info('...done.') logging.info('Computing global and within-cluster average distances') # Compute average distance between vectors overall global_average_distance = average_distance_between_vectors(feature_vectors, distance) logging.info('Global average ' + distance.__name__ + ' between vectors: ' + str(global_average_distance)) # Compute average distance within each cluster for j, vector_indices in indices_for_label.items(): vectors_in_cluster = [feature_vectors[index] for index in vector_indices] average_cluster_distance = average_distance_between_vectors(vectors_in_cluster, distance) logging.info('Average ' + distance.__name__ + ' between vectors in cluster ' + str(j) + ': ' + str(average_cluster_distance))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_vectors (file_extension = None):\n \n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n \n prettyPrint( \"Loading feature vectors and labels from disk ... \", color.CYAN)\n if not os.path.isfile(feat_file_name) or not os....
[ "0.5931401", "0.58759993", "0.5818201", "0.5670178", "0.55925786", "0.5564548", "0.5553166", "0.5509292", "0.5509292", "0.5505033", "0.55044186", "0.5498727", "0.5490172", "0.54632276", "0.54203254", "0.54109955", "0.5390733", "0.5373782", "0.5334791", "0.5330502", "0.5268046...
0.65529263
0
Make custom attributes and methods to native
def native_methods(self): base_attributes = { *dir(TapiAdapter), *dir(TapiClientExecutor), *dir(JSONAdapterMixin), "serializer", } a = [ attr for attr in dir(self) if not attr.startswith("_") and attr not in base_attributes ] return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attributes(self):\n raise NotImplementedError", "def __attrs_post_init__(self):", "def _set_attributes(self):", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self,...
[ "0.6156814", "0.60756165", "0.5929101", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", ...
0.0
-1
Create of url request
def fill_resource_template_url(self, template, params, resource): try: return template.format(**params) except KeyError: all_keys = re.findall(r"{(.[^\}]*)", template) range_not_set_keys = set(all_keys) - set(params.keys()) not_set_keys = "', '".join(range_not_set_keys) raise TypeError( "{}() missing {} required url params: '{}'".format( resource, len(range_not_set_keys), not_set_keys ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url", "def _CreateRequest(self, url, data=None):\r\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\r\n req =...
[ "0.7095145", "0.70785993", "0.7020341", "0.69661885", "0.6836974", "0.67563176", "0.6740314", "0.6718406", "0.6607577", "0.6582395", "0.65728194", "0.65544945", "0.6512967", "0.6509939", "0.6411456", "0.6373932", "0.63712084", "0.63712084", "0.6340278", "0.62835985", "0.62759...
0.0
-1
Adding parameters to a request
def get_request_kwargs(self, api_params, *args, **kwargs): serialized = self.serialize_data(kwargs.get("data")) kwargs["data"] = self.format_data_to_request(serialized) return kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_params(self, params: t.Mapping[str, str]) -> 'Request':\n return replace(self, params={**self.params, **params})", "def additional_access_token_request_parameters(self, parameters, request):", "def get_request_extra_params(self, **kwargs):\n params = self.request_extra_params.copy()\n ...
[ "0.74066496", "0.72835517", "0.6995281", "0.6825499", "0.68013364", "0.65432173", "0.6506432", "0.64661425", "0.6460794", "0.63875496", "0.6370902", "0.6263036", "0.62520313", "0.6237418", "0.6223249", "0.61894953", "0.6144124", "0.61332905", "0.6104013", "0.60877264", "0.607...
0.58247703
53
Get error from response.
def get_error_message(self, data, response=None): return str(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getError(self):\n \n return self.resp[\"error\"]", "def _get_error_message(response):\n try:\n return response.json()[\"detail\"]\n except (KeyError, _JSONDecodeError):\n return response.text", "def error(self):\n error = self._wrapped.error\n ...
[ "0.83358467", "0.78637624", "0.7812136", "0.7469619", "0.74155784", "0.7169605", "0.7166862", "0.7096074", "0.7082572", "0.704426", "0.7022822", "0.69620997", "0.6958218", "0.6933554", "0.6891388", "0.68128866", "0.67896175", "0.67648655", "0.6737759", "0.66620314", "0.664474...
0.6469981
34
Wrapper for throwing custom exceptions. When, for example, the server responds with 200, and errors are passed inside json.
def error_handling( self, tapi_exception, error_message, repeat_number, response, request_kwargs, api_params, **kwargs ): raise tapi_exception
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_json_error(ex):\n if isinstance(ex, HTTPException):\n return ex;\n elif isinstance(ex, ResourceException):\n info = ex.to_dict()\n status_code = ex.http_status\n info[\"type\"] = \"exception\"\n else:\n message = \"There was an in...
[ "0.73746616", "0.73421353", "0.73373085", "0.7094187", "0.7060387", "0.70266795", "0.7011387", "0.69837064", "0.69805527", "0.6943718", "0.6921114", "0.6909217", "0.69065756", "0.6879482", "0.67980826", "0.67973834", "0.67954606", "0.678849", "0.6778472", "0.67188394", "0.671...
0.0
-1
Conditions for repeating a request. If it returns True, the request will be repeated.
def retry_request( self, tapi_exception, error_message, repeat_number, response, request_kwargs, api_params, **kwargs ): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _do_request(self):\n\n if time.time() < self._next_request:\n return False\n else:\n return True", "def valid_in_request(self):\n return self._repeatable[0] is not None", "def valid_multiple_in_request(self):\n return self._repeatable[0] is True", "def va...
[ "0.7024639", "0.6854883", "0.6750136", "0.6601198", "0.6564489", "0.64355195", "0.61150056", "0.60088533", "0.5957636", "0.58485484", "0.57753676", "0.574291", "0.57246184", "0.5706755", "0.55322963", "0.549886", "0.5494331", "0.5449525", "0.5437684", "0.5399534", "0.5398021"...
0.5810275
10
Stuff to do before every test.
def setUp(self): # Get the Flask test client self.client = app.test_client() #Shows Flask errors that happen during tests app.config['TESTING'] = True #To test sessions we need to set Secret key app.config['SECRET_KEY'] = 'key' # Connect to test database connect_to_db(app, "postgresql:///testdb") # Create tables and add sample data db.create_all() users() reviews()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def do_before(self):\r\n pass", "def before_test(self, func, *args, **kwargs):\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")",...
[ "0.8211315", "0.802689", "0.80261695", "0.7873411", "0.76571673", "0.76434696", "0.76261467", "0.75419456", "0.75174975", "0.7496759", "0.7496759", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0...
0.0
-1
Stuff to do before every test.
def setUp(self): # Get the Flask test client self.client = app.test_client() # Show Flask errors that happen during tests app.config['TESTING'] = True #To test sessions we need to set Secret key app.config['SECRET_KEY'] = 'key' # Connect to test database connect_to_db(app, "postgresql:///testdb") # Create tables and add sample data db.create_all() users() reviews() with self.client as c: with c.session_transaction() as sess: sess['user_id'] = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def do_before(self):\r\n pass", "def before_test(self, func, *args, **kwargs):\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")",...
[ "0.8211315", "0.802689", "0.80261695", "0.7873411", "0.76571673", "0.76434696", "0.76261467", "0.75419456", "0.75174975", "0.7496759", "0.7496759", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0...
0.0
-1
Do at end of every test.
def tearDown(self): db.session.close() db.drop_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_finished(self):\n\n # We'll start the next test in an idle, so that the current one is\n # properly terminated, and we do not execute in its context\n\n GLib.idle_add(self._do_test)", "def test_run_ended(self):", "def finished_tests(self):\n self.testing = 0", ...
[ "0.7842646", "0.7775997", "0.77741504", "0.77669877", "0.76620615", "0.7591832", "0.74713165", "0.7361014", "0.73159444", "0.7298099", "0.72939897", "0.72939897", "0.7259042", "0.723601", "0.72323287", "0.7225132", "0.7225132", "0.72186595", "0.720453", "0.720453", "0.720453"...
0.0
-1
Test this page can only be reached if user is in session
def test_user_profile_page(self): result = self.client.get("/profile", follow_redirects=True) self.assertIn(b"User ID", result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_session_not_accessed(self):\n response = self.client.get(\"/auth_processor_no_attr_access/\")\n self.assertContains(response, \"Session not accessed\")", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_u...
[ "0.7483035", "0.743996", "0.743996", "0.743996", "0.743996", "0.73823667", "0.7071074", "0.7004314", "0.7000037", "0.69217306", "0.6881494", "0.68084276", "0.68084276", "0.6783943", "0.67708766", "0.67426723", "0.6722217", "0.6721236", "0.6721236", "0.6721236", "0.6698859", ...
0.0
-1
Test this page can only be reached if user is in session
def test_search_page(self): result = self.client.get("/search") self.assertIn(b"Search", result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_session_not_accessed(self):\n response = self.client.get(\"/auth_processor_no_attr_access/\")\n self.assertContains(response, \"Session not accessed\")", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_u...
[ "0.7482324", "0.7440397", "0.7440397", "0.7440397", "0.7440397", "0.7383309", "0.707093", "0.7003852", "0.6998608", "0.6921593", "0.68799853", "0.6807848", "0.6807848", "0.6783388", "0.67709535", "0.6742237", "0.67224073", "0.6720221", "0.6720221", "0.6720221", "0.669822", ...
0.0
-1
Test user's if favourite restaurant is added to DB
def test_add_to_fav_(self): result = self.client.post("/add_to_fav", data={"yelp_biz_id":"JA_V9TqDCrkgknqrcUndIQ", "yelp_rest_name":"Siam", "yelp_rating":"4", "yelp_category":"Thai", "yelp_price":"$$", "yelp_image_url":"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg" }) DB_result = Restaurant_details.query.filter_by(biz_id = "JA_V9TqDCrkgknqrcUndIQ").first() self.assertIsNotNone(DB_result) #testing that the returned result is not NONE self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be self.assertIn(b"Your Favourite has been saved", result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Favourite(self):\n self.assertEquals(self.fav_1.pk, 1)\n self.assertEquals(self.fav_1.date_added, '2019-12-20 09:00:00')\n self.assertEquals(self.fav_1.user.pk, 1)\n self.assertEquals(self.fav_1.product.pk, 1)", "async def create(self, favorite: Favorite) -> Favorite:", "de...
[ "0.72114295", "0.6636154", "0.6632463", "0.65658706", "0.64831656", "0.6482696", "0.6476188", "0.64745235", "0.6445702", "0.6381491", "0.6374218", "0.6319528", "0.6261317", "0.6236881", "0.6179164", "0.6172634", "0.6163744", "0.6161769", "0.6146276", "0.6124457", "0.60840327"...
0.78729564
0
Stuff to do before every test.
def setUp(self): # Get the Flask test client self.client = app.test_client() # Show Flask errors that happen during tests app.config['TESTING'] = True # Connect to test database connect_to_db(app, "postgresql:///testdb") # Create tables and add sample data db.create_all() users() reviews() with self.client as c: with c.session_transaction() as sess: sess['user_id'] = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def do_before(self):\r\n pass", "def before_test(self, func, *args, **kwargs):\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")",...
[ "0.8209909", "0.8028296", "0.80253834", "0.7872079", "0.76552856", "0.76427346", "0.7627397", "0.7540311", "0.7515845", "0.74945575", "0.74945575", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0...
0.0
-1
Do at end of every test.
def tearDown(self): db.session.close() db.drop_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_finished(self):\n\n # We'll start the next test in an idle, so that the current one is\n # properly terminated, and we do not execute in its context\n\n GLib.idle_add(self._do_test)", "def test_run_ended(self):", "def finished_tests(self):\n self.testing = 0", ...
[ "0.78425974", "0.77754915", "0.77742684", "0.7766872", "0.76616216", "0.75920033", "0.7470884", "0.73606", "0.73159856", "0.7297722", "0.7293865", "0.7293865", "0.72588974", "0.7235985", "0.72320426", "0.7224789", "0.7224789", "0.72181326", "0.72043455", "0.72043455", "0.7204...
0.0
-1
Find restaurant name by zipcode.
def test_process_searchbox_with_mock(self): result = self.client.get('/process_searchbox', data={'zipcode': '94043', 'cuisine': 'indian'}) self.assertIn(b"Dosa Paratha", result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('I...
[ "0.72173095", "0.669443", "0.66253436", "0.66198653", "0.64643675", "0.6446394", "0.6327299", "0.62487876", "0.6163434", "0.6108556", "0.6016015", "0.5976856", "0.59515876", "0.5928086", "0.57584274", "0.57179135", "0.5710414", "0.5634522", "0.56298053", "0.5622634", "0.55962...
0.0
-1
Return the nth Fibonacci number
def fibonacci(n): if n in (0,1): return n return (fibonacci(n-2) + fibonacci(n-1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fibonacci(n):", "def nthFibonacci(n):\n\n # Run some basic error checking\n try:\n n = int(n)\n except: # if this fails not a number inputed\n sys.stderr.write('Incorrect data input\\n')\n return None\n if n < 0:\n sys.stderr.write('Only positive integers allowed\\n')\n return None\n \n ...
[ "0.8549428", "0.83808696", "0.8357446", "0.8267962", "0.82505155", "0.8213346", "0.8213339", "0.8212905", "0.81149447", "0.8098057", "0.80716306", "0.8040074", "0.8032861", "0.8029916", "0.8029916", "0.8019259", "0.8019077", "0.8012156", "0.8007875", "0.8001539", "0.79852134"...
0.78968835
39
Get the eol mode map
def EOLModeMap(): # Maintenance Note: ints must be kept in sync with EDSTC_EOL_* in edstc return { EOL_MODE_CR : _("Old Machintosh (\\r)"), EOL_MODE_LF : _("Unix (\\n)"), EOL_MODE_CRLF : _("Windows (\\r\\n)")}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_eol_for_open(self) -> str:\n map = {\n EOLTypes.CRLF: WINDOWS_EOL,\n EOLTypes.LF: UNIX_EOL,\n EOLTypes.NATIVE: linesep,\n }\n\n return map[self]", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", ...
[ "0.63044417", "0.6147848", "0.5959693", "0.5748074", "0.57146144", "0.5593236", "0.54169565", "0.54102844", "0.53679585", "0.5359293", "0.5346539", "0.5331634", "0.53012353", "0.5280056", "0.52593654", "0.52485204", "0.51640564", "0.5128611", "0.51207393", "0.51181734", "0.50...
0.808408
0
Base Class for all controls. On its own, this is just a Label in a BoxLayout.
def __init__(self, label:str=None, variable_name:str=None, value:typing.Any=None, parent:QtWidgets.QWidget=None, on_change:typing.Callable=None): QtWidgets.QWidget.__init__(self, parent=parent) if label is None: if variable_name is None: label = "" else: label = app.translator(variable_name) self._make_label_widget(label) self.layout = self._formset() self.setLayout(self.layout) self.label = label ValueMixin.__init__(self, variable_name=variable_name, on_change=on_change, value=value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_label(self):\n\n self.pc_label = Label(self.form_box, text=\"Primary Current [A]:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.sc_label = Label(self.form_box, text=\"Secondary Current [A]:\", anchor='nw', width=32,\n ...
[ "0.68835676", "0.66542274", "0.63613844", "0.6344261", "0.63232493", "0.630243", "0.6290771", "0.61542106", "0.6145763", "0.6128528", "0.60839635", "0.60793793", "0.6052032", "0.60399705", "0.60251766", "0.6019413", "0.5956044", "0.59486943", "0.5947741", "0.5929713", "0.5921...
0.60125613
16
fit le modele puis retourne ses performances en test
def score(self, archi:ArchitectureNN): archi.fit_model(self.train_data, **self.train_params) return archi.compute_test_score(self.test_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_test(self):", "def experiment_models(train, test, train_target, test_target):\n # Linear models\n linear_models = [(LinearRegression, {\"n_jobs\": -1}),\n (Lasso, {\"alpha\": 3}),\n (Ridge, {\"alpha\": 3}),\n (LinearSVR, {\"random_state\":...
[ "0.70706695", "0.64831716", "0.64393735", "0.6433011", "0.64119494", "0.6411802", "0.6275987", "0.624839", "0.62458396", "0.62132657", "0.62124383", "0.6203546", "0.62026036", "0.6191927", "0.6183838", "0.615125", "0.61467177", "0.61371404", "0.6132172", "0.6127471", "0.61209...
0.0
-1