query stringlengths 9 9.05k | document stringlengths 10 222k | metadata dict | negatives listlengths 30 30 | negative_scores listlengths 30 30 | document_score stringlengths 4 10 | document_rank stringclasses 2
values |
|---|---|---|---|---|---|---|
Test API can get a list of all employees(GET request) | def test_api_can_get_all_employees(self):
res = self.client().get(service_url_emp)
self.assertEqual(res.status_code, 200)
self.assertIn('name1', str(res.data))
self.assertIn('name2', str(res.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200",
"def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee,... | [
"0.7478678",
"0.7415778",
"0.7400966",
"0.73436856",
"0.711193",
"0.6989079",
"0.68708664",
"0.68625116",
"0.68500465",
"0.68442833",
"0.68407613",
"0.67822254",
"0.66658956",
"0.6658519",
"0.66278076",
"0.6532204",
"0.65165836",
"0.64601713",
"0.6449148",
"0.6427563",
"0.641... | 0.8612314 | 0 |
Test API can get a single employee by it's id | def test_api_can_get_employee_by_id(self):
res = self.client().get(service_url_emp+'/1')
self.assertEqual(res.status_code, 200)
self.assertIn('name1', str(res.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)",
"def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'messag... | [
"0.77058524",
"0.76768017",
"0.7541592",
"0.74530566",
"0.73932606",
"0.7201871",
"0.71742916",
"0.71203345",
"0.7098658",
"0.70788777",
"0.6995682",
"0.6907228",
"0.6837458",
"0.67132115",
"0.6658866",
"0.66381055",
"0.6613429",
"0.659436",
"0.65936464",
"0.657754",
"0.65598... | 0.8674464 | 0 |
Test API can delete an existing employee. (DELETE request) | def test_employee_deletion(self):
res = self.client().delete(service_url_emp, json={"id_emp": 1})
self.assertEqual(res.status_code, 204)
# Test to see if it exists, should return a 400
result = self.client().get(service_url_emp+'/1')
self.assertEqual(result.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_no_employee(self):\n self.test_employee.delete()\n url = reverse_lazy('api:me-employees')\n response = self.client.get(url)\n self.assertEquals(response.status_code, 403)",
"def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://loc... | [
"0.7686066",
"0.7555719",
"0.7523974",
"0.72330946",
"0.72253567",
"0.72195405",
"0.7156919",
"0.70961356",
"0.70768404",
"0.70662475",
"0.70438784",
"0.7029802",
"0.70208484",
"0.6992566",
"0.69601595",
"0.6914826",
"0.6910496",
"0.6900446",
"0.6896528",
"0.6888153",
"0.6870... | 0.8828227 | 0 |
Test API can search employee by birth date | def test_api_can_search_employee_by_birth_date(self):
res = self.client().get(service_url_emp+'/search/2014-10-24')
self.assertEqual(res.status_code, 200)
self.assertIn('name2', str(res.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_api_can_search_employee_by_between_dates(self):\n res = self.client().get(service_url_emp+'/search_between/2013-10-24,2014-10-24')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))",
"def test_search_two_d... | [
"0.70000744",
"0.63187903",
"0.62191707",
"0.6206472",
"0.6201012",
"0.6194432",
"0.61828655",
"0.6132267",
"0.61312056",
"0.61046326",
"0.6066911",
"0.6014641",
"0.5994299",
"0.59937394",
"0.5989396",
"0.59791684",
"0.59529334",
"0.59364206",
"0.5929552",
"0.58888996",
"0.58... | 0.8959767 | 0 |
Test API can search employee by between two dates | def test_api_can_search_employee_by_between_dates(self):
res = self.client().get(service_url_emp+'/search_between/2013-10-24,2014-10-24')
self.assertEqual(res.status_code, 200)
self.assertIn('name1', str(res.data))
self.assertIn('name2', str(res.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_api_can_search_employee_by_birth_date(self):\n res = self.client().get(service_url_emp+'/search/2014-10-24')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name2', str(res.data))",
"def test_date_rage(self):\n\n query_params = {\n 'until_date': self.toda... | [
"0.7454268",
"0.68482393",
"0.6834848",
"0.6611345",
"0.6533663",
"0.64855504",
"0.64845145",
"0.6470128",
"0.646424",
"0.6409036",
"0.6393102",
"0.63234764",
"0.62835866",
"0.6271889",
"0.6265861",
"0.6254339",
"0.6247781",
"0.62460834",
"0.6193098",
"0.6177852",
"0.61352086... | 0.88856506 | 0 |
This function can be used to selectively filter out specific permutation combinations. It is called by RunPermutations for every possible permutation of the variables in the permutations dict. It should return True for valid a combination of permutation values and False for an invalid one. | def permutationFilter(perm):
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def permutation_is_valid(permutation):\n pass",
"def check_permutation(u, v):\n for permutation in itertools.permutations(u):\n if v == permutation:\n return True\n return False",
"def valid_parameter_combinations(parameterSpace):\n all_combinations = product(*parameterSpace.v... | [
"0.6440923",
"0.62097514",
"0.6092914",
"0.56775457",
"0.5497833",
"0.5466948",
"0.5403812",
"0.5400389",
"0.53718925",
"0.53197914",
"0.5313259",
"0.52997786",
"0.52935493",
"0.5287549",
"0.5284966",
"0.52562",
"0.5228215",
"0.5162008",
"0.51274145",
"0.51215994",
"0.5102217... | 0.7232595 | 0 |
Get a list of available variants. The list may be empty, and must be None in case of error. | def fetchVariantList(self, url):
html = self.fetchHtml(url)
if html is None:
return None
# Get variants
variants_data = []
variants = []
for button in html.xpath("//h3[@class='downloads']//a"):
name = button.xpath("text()")[0].strip()
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def variants ( self ) :\n vars = []\n items = [ 'distrib' , 'default' ]\n items += [ 'stat_%s' % d for d in range ( 10 ) ]\n items += [ 'syst_%s' % d for d in range ( 10 ) ]\n \n from ostap.core.core import rootError \n from ostap.logger.logger import log... | [
"0.64393675",
"0.6411364",
"0.626434",
"0.6205572",
"0.61462814",
"0.6107378",
"0.6058094",
"0.5991796",
"0.59116775",
"0.5792999",
"0.57455677",
"0.56936234",
"0.56718695",
"0.5625359",
"0.5617946",
"0.5613669",
"0.56135285",
"0.5612513",
"0.5567904",
"0.5525392",
"0.5525276... | 0.66466016 | 0 |
Fill material_data with data from the selected variant. Must fill material_data.name and material_data.maps. Return a boolean status, and fill self.error to add error messages. | def fetchVariant(self, variant_index, material_data):
# Get data saved in fetchVariantList
html = self._html
variants_data = self._variants_data
if variant_index < 0 or variant_index >= len(variants_data):
self.error = "Invalid variant index: {}".format(variant_index... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_material_data(self, material):\n material_yaml_file = glob.glob(os.path.join(material_dir, material + '.yaml'))\n\n inputs = utilities.yaml_reader(material_yaml_file, material_dir, material)\n self.name = inputs['Name']\n self.materialName = material\n self.elements = in... | [
"0.5540224",
"0.5305743",
"0.5173134",
"0.5126462",
"0.5113751",
"0.5105645",
"0.50716984",
"0.5012885",
"0.5002888",
"0.49937847",
"0.491464",
"0.48269656",
"0.47691706",
"0.47614205",
"0.4725722",
"0.4714299",
"0.47136438",
"0.47127554",
"0.47089148",
"0.4701902",
"0.465203... | 0.60064197 | 0 |
Raises an exception if the tensor rank is not of the expected rank. | def assert_rank(tensor, expected_rank, name=None):
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndi... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assert_rank(tensor, expected_rank, name=None):\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] ... | [
"0.7573716",
"0.69100684",
"0.6770358",
"0.6650631",
"0.6231268",
"0.6066077",
"0.6011113",
"0.59122485",
"0.58956087",
"0.5846304",
"0.58355474",
"0.5783303",
"0.57702374",
"0.5764062",
"0.57635987",
"0.5751452",
"0.5733349",
"0.56984454",
"0.5661566",
"0.565813",
"0.5647805... | 0.76834285 | 1 |
Creates a `truncated_normal_initializer` with the given range. | def create_initializer(initializer_range=0.02):
return tf.compat.v1.truncated_normal_initializer(stddev=initializer_range) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_initializer(initializer_range=0.02):\n return tf.truncated_normal_initializer(stddev=initializer_range)",
"def create_initializer(initializer_range=0.02):\n return tf.truncated_normal_initializer(stddev=initializer_range)",
"def create_initializer(initializer_range=0.02):\n return tf.truncate... | [
"0.81752145",
"0.81752145",
"0.8153977",
"0.7396352",
"0.6658898",
"0.65713483",
"0.6411789",
"0.6391786",
"0.63814235",
"0.62771004",
"0.62771004",
"0.6252857",
"0.621467",
"0.62111574",
"0.6149097",
"0.6082527",
"0.5928889",
"0.58472204",
"0.5730975",
"0.5706199",
"0.570041... | 0.824168 | 0 |
Maps labels in metadata to assignment types | def label_to_atype(labels):
atypes = []
for label in labels:
if isinstance(label, AssignmentType):
atypes.append(label)
if label.lower() == "lxc":
atypes.append(AssignmentType.LXC)
elif label.lower() == "baremetal":
atypes.append(AssignmentType.BareMet... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_labels_and_mapping(self, labels, mapping):\n numbered_classes = list(enumerate(list(labels), start=0))\n if mapping:\n new_mapping = {number: str(mapping[label]) for number, label in numbered_classes}\n else:\n new_mapping = {number: str(label) for number, lab... | [
"0.6138519",
"0.5933744",
"0.5866758",
"0.5866758",
"0.5786512",
"0.5786512",
"0.57188714",
"0.57151353",
"0.55421776",
"0.5512994",
"0.54907334",
"0.54420584",
"0.54413205",
"0.5440561",
"0.5437112",
"0.5422614",
"0.54225236",
"0.54204667",
"0.54132175",
"0.54111546",
"0.540... | 0.66578543 | 0 |
The style of the input text field. | def style(self) -> InputTextStyle:
return self._underlying.style | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_color(self, txt):\n color = self.valid_color\n if not self.hasAcceptableInput():\n color = self.invalid_color\n self.setStyleSheet(\"background-color: %s\" % color)",
"def style(self):\n return self['style']",
"def match_style(self, input_style: str) -> str:\r... | [
"0.6297507",
"0.6139241",
"0.60388416",
"0.592602",
"0.592602",
"0.58029115",
"0.5770399",
"0.5738665",
"0.5722834",
"0.5672692",
"0.5660816",
"0.5595364",
"0.5564458",
"0.549983",
"0.5498407",
"0.5497021",
"0.5464898",
"0.54125243",
"0.5386978",
"0.5375206",
"0.5355189",
"... | 0.78181124 | 0 |
Function to determine whether we are allowed to call `get_mpi_pool`. | def can_use_mpi_pool():
return ALLOW_SPAWN or ALREADY_RUNNING_AS_MPI | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False",
"def has_answerpool(self):\r\n return hasattr(self, '_has_answerpool')",
"def has_mpi(self):\n return bool(self.mpi_runner)",
"def has_mpi_peer_processes():\n return mpi4py... | [
"0.666223",
"0.65785354",
"0.64785296",
"0.64087653",
"0.63978124",
"0.6303463",
"0.6174961",
"0.6139839",
"0.61166644",
"0.60749656",
"0.58839196",
"0.584911",
"0.5793402",
"0.5737615",
"0.57042027",
"0.5693275",
"0.5663002",
"0.5648575",
"0.5628458",
"0.5582699",
"0.5582061... | 0.8271317 | 0 |
Broadcast a result to all workers, dispatching to proper MPI (rather than pickled) communication if the result is a numpy array. | def bcast(result, comm, result_rank):
rank = comm.Get_rank()
# make sure all workers know if result is an array or not
if rank == result_rank:
is_ndarray = isinstance(result, np.ndarray)
else:
is_ndarray = None
is_ndarray = comm.bcast(is_ndarray, root=result_rank)
# standard (p... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Allreduce(\n self,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n op: MPI.Op = MPI.SUM,\n ):\n ret, sbuf, rbuf, buf = self.__reduce_like(self.handle.Allreduce, sendbuf, recvbuf, op)\n if buf is not None and isinstance... | [
"0.58564526",
"0.5782116",
"0.5733982",
"0.5675335",
"0.55820996",
"0.55295515",
"0.5480815",
"0.5480762",
"0.543126",
"0.542469",
"0.54245704",
"0.5397666",
"0.5393652",
"0.53869706",
"0.5362338",
"0.52945507",
"0.52782714",
"0.5240479",
"0.52378017",
"0.52356863",
"0.522221... | 0.7116765 | 0 |
Get the MPI executor pool, with specified number of processes and threads per process. | def get_mpi_pool(num_workers=None, num_threads=1):
if (num_workers == 1) and (num_threads == _NUM_THREAD_WORKERS):
from concurrent.futures import ProcessPoolExecutor
return ProcessPoolExecutor(1)
if not QUIMB_MPI_LAUNCHED:
raise RuntimeError(
"For the moment, quimb programs ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pool(b_dummy=True, num=4):\n if b_dummy:\n pool = ThreadPool(num)\n else:\n pool = ProcessPool(num)\n\n return pool",
"def get_executor(max_workers: int) -> Executor:\n return (\n DummyExecutor()\n if max_workers == 1\n else ProcessPoolExecutor(max_workers o... | [
"0.71476984",
"0.6638191",
"0.6473073",
"0.6456225",
"0.6444362",
"0.61933714",
"0.6124338",
"0.61074543",
"0.6093102",
"0.5918116",
"0.5846336",
"0.5798279",
"0.5750463",
"0.5717205",
"0.56760335",
"0.5673281",
"0.5671824",
"0.5667239",
"0.5662297",
"0.56489134",
"0.5647965"... | 0.7311589 | 0 |
Add a new path to look for primitives. The new path will be inserted in the first place of the list, so any primitive found in this new folder will take precedence over any other primitive with the same name that existed in the system before. | def add_primitives_path(path):
if path not in _PRIMITIVES_PATHS:
if not os.path.isdir(path):
raise ValueError('Invalid path: {}'.format(path))
LOGGER.debug('Adding new primitives path %s', path)
_PRIMITIVES_PATHS.insert(0, os.path.abspath(path)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_primitives_path(path):\n added = _add_lookup_path(path, _PRIMITIVES_PATHS)\n if added:\n LOGGER.debug('New primitives path added: %s', path)",
"def AddPath(self, path):\n self.paths.append(str(path))\n self.paths.sort()",
"def _add_one(self, path):\n\n if not type(path... | [
"0.7619783",
"0.70127213",
"0.69399655",
"0.6817209",
"0.6505161",
"0.63751376",
"0.6185189",
"0.6095576",
"0.60904014",
"0.60371226",
"0.59809875",
"0.5979583",
"0.5973543",
"0.5948789",
"0.5947534",
"0.5808803",
"0.5796138",
"0.57915574",
"0.5779705",
"0.5742069",
"0.564184... | 0.79477745 | 0 |
Get the list of folders where the primitives will be looked for. This list will include the value of any `entry_point` named `jsons_path` published under the name `mlprimitives`. | def get_primitives_paths():
primitives_paths = list()
entry_points = pkg_resources.iter_entry_points('mlprimitives')
for entry_point in entry_points:
if entry_point.name == 'jsons_path':
path = entry_point.load()
primitives_paths.append(path)
return _PRIMITIVES_PATHS + p... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_primitives_paths():\n paths = _load_entry_points('primitives') + _load_entry_points('jsons_path', 'mlprimitives')\n return _PRIMITIVES_PATHS + paths",
"def shapes():\n # -- Define a list of locations to search for, starting by\n # -- adding in our builtin shape locations\n paths = [\n ... | [
"0.7141229",
"0.6743173",
"0.6618696",
"0.615481",
"0.6012607",
"0.59987366",
"0.5982601",
"0.5922148",
"0.5847463",
"0.582099",
"0.5768875",
"0.57664824",
"0.5757282",
"0.56904095",
"0.5656275",
"0.56554615",
"0.56318593",
"0.5618304",
"0.5606496",
"0.55484414",
"0.554087",
... | 0.76567644 | 0 |
Locate and load the JSON annotation of the given primitive. All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file with the given name, and as soon as a JSON with the given name is found it is returned. | def load_primitive(name):
for base_path in get_primitives_paths():
parts = name.split('.')
number_of_parts = len(parts)
for folder_parts in range(number_of_parts):
folder = os.path.join(base_path, *parts[:folder_parts])
filename = '.'.join(parts[folder_parts:]) + '.j... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_annotation(json_path):\n # Open the file containing the annotation\n with open(json_path) as annotation_file:\n\n # Parse the AI2D annotation from the JSON file into a dictionary\n annotation = json.load(annotation_file)\n\n # Return the annotation\n retu... | [
"0.5557436",
"0.5461464",
"0.53854406",
"0.531877",
"0.51922894",
"0.5177836",
"0.5138669",
"0.51056015",
"0.51037264",
"0.504644",
"0.50388736",
"0.50041217",
"0.49636927",
"0.49334192",
"0.49191824",
"0.4907879",
"0.48960394",
"0.48271298",
"0.48257434",
"0.47950384",
"0.47... | 0.70710224 | 0 |
Progressive widening beam search to find a node. The progressive widening beam search involves a repeated beam search, starting with a small beam width then extending to progressively larger beam widths if the target node is not found. This implementation simply returns the first node found that matches the termination... | def progressive_widening_search(G, source, value, condition, initial_width=1):
# Check for the special case in which the source node satisfies the
# termination condition.
if condition(source):
return source
# The largest possible value of `i` in this range yields a width at
# least the numb... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices ... | [
"0.589111",
"0.55604035",
"0.5548636",
"0.5295215",
"0.5288581",
"0.52520144",
"0.52117807",
"0.5199523",
"0.51886535",
"0.514051",
"0.51387787",
"0.5101167",
"0.50885487",
"0.50710326",
"0.5070418",
"0.5047442",
"0.50232387",
"0.50172395",
"0.50024635",
"0.49968472",
"0.4994... | 0.83419585 | 0 |
Search for a node with high centrality. In this example, we generate a random graph, compute the centrality of each node, then perform the progressive widening search in order to find a node of high centrality. | def main():
G = nx.gnp_random_graph(100, 0.5)
centrality = nx.eigenvector_centrality(G)
avg_centrality = sum(centrality.values()) / len(G)
def has_high_centrality(v):
return centrality[v] >= avg_centrality
source = 0
value = centrality.get
condition = has_high_centrality
found... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def node_edge_centrality(\n H,\n f=lambda x: np.power(x, 2),\n g=lambda x: np.power(x, 0.5),\n phi=lambda x: np.power(x, 2),\n psi=lambda x: np.power(x, 0.5),\n max_iter=100,\n tol=1e-6,\n):\n from ..algorithms import is_connected\n\n # if there aren't any nodes or edges, return an empty... | [
"0.6254127",
"0.59585714",
"0.5949099",
"0.58462274",
"0.583967",
"0.58372754",
"0.5818489",
"0.5792716",
"0.5791913",
"0.5777286",
"0.5737924",
"0.5728161",
"0.57143867",
"0.5713368",
"0.5711858",
"0.5711858",
"0.56890774",
"0.5639402",
"0.56360054",
"0.56196046",
"0.5611685... | 0.8351035 | 0 |
Don't emit for subclasses of dict, with __reversed__ implemented. | def test_dict_ancestor_and_reversed():
from collections import OrderedDict
class Child(dict):
def __reversed__(self):
return reversed(range(10))
seq = reversed(OrderedDict())
return reversed(Child()), seq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __reversed__(self):\n\t\treturn reversed(self.__dict__.values())",
"def reverse_dicts(self):\n\t\tself.rev_worddict = {self.worddict[word]: word for word in self.worddict}\n\t\tself.rev_classdict = {self.classdict[cl]: cl for cl in self.classdict}",
"def clear(self):\n super(ReadOnlyDict, self).clea... | [
"0.64410263",
"0.6156265",
"0.5848026",
"0.5798487",
"0.56366265",
"0.55165565",
"0.54997116",
"0.54988617",
"0.5477811",
"0.54397434",
"0.5428426",
"0.5412256",
"0.53976214",
"0.5380523",
"0.5374235",
"0.53430724",
"0.52987653",
"0.5297441",
"0.5287265",
"0.5274791",
"0.5269... | 0.70229346 | 0 |
Don't emit when reversing enum classes | def test_dont_emit_for_reversing_enums():
from enum import IntEnum
class Color(IntEnum):
RED = 1
GREEN = 2
BLUE = 3
for color in reversed(Color):
yield color | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enum(**enums):\n reverse = dict((value, key) for key, value in iteritems(enums))\n enums['reverse_mapping'] = reverse\n return type('Enum', (), enums)",
"def reverse_enum(\n enum_to_reverse: Union[\n Type[SMOOTHIE_G_CODE],\n Type[MAGDECK_G_CODE],\n Type[TEMPDECK_G_CODE],\n ... | [
"0.6425592",
"0.6408745",
"0.585184",
"0.58211386",
"0.5819746",
"0.5771788",
"0.5760759",
"0.5676922",
"0.56572753",
"0.56236005",
"0.5623336",
"0.56230915",
"0.5613023",
"0.55931896",
"0.5510386",
"0.5510211",
"0.55055887",
"0.54664016",
"0.54496217",
"0.54444015",
"0.53903... | 0.79641455 | 0 |
Count the number of occurrences of "A", "C", "G" and "T" in dna_string. | def n_count(dna_string):
a_count = 0
c_count = 0
g_count = 0
t_count= 0
for nuc in dna_string:
if nuc.upper() == 'A':
a_count += 1
elif nuc.upper() == 'C':
c_count += 1
elif nuc.upper() == 'G':
g_count += 1
elif nuc.upper() == 'T':
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_nucleotides(dna, nucleotide):\n count = 0\n for char in dna:\n if char == nucleotide:\n count += 1\n return count",
"def count_nucleotides(dna, nucleotide):\n return dna.count(nucleotide)",
"def test_counts(self):\n # test DNA seq\n orig = \"AACCGGTTAN-T\"\... | [
"0.7195303",
"0.68514836",
"0.6521141",
"0.6516966",
"0.64682543",
"0.6450685",
"0.63887256",
"0.62657034",
"0.62168676",
"0.6204442",
"0.6142755",
"0.6090269",
"0.60803914",
"0.60583884",
"0.6036575",
"0.5997457",
"0.5997136",
"0.5991749",
"0.5989443",
"0.59876466",
"0.59838... | 0.84504104 | 0 |
Loads all the spine meshes from the spines directory | def load_spine_meshes(self):
# Load all the template spines and ignore the verbose messages of loading
nmv.utilities.disable_std_output()
self.spine_meshes = nmv.file.load_spines(nmv.consts.Paths.SPINES_MESHES_HQ_DIRECTORY)
nmv.utilities.enable_std_output()
# Create the material... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_meshes(self):\n for meta_mesh in self.gltf.meshes:\n # Returns a list of meshes\n meshes = meta_mesh.load(self.materials)\n self.meshes.append(meshes)\n\n for mesh in meshes:\n self.scene.meshes.append(mesh)",
"def add_spines_to_morpholog... | [
"0.6232682",
"0.61770344",
"0.60120726",
"0.5890612",
"0.58509433",
"0.5817586",
"0.56453604",
"0.5583689",
"0.5582022",
"0.5480866",
"0.5473984",
"0.54697216",
"0.54214287",
"0.54079974",
"0.5405352",
"0.5390358",
"0.5379387",
"0.53608745",
"0.5338698",
"0.5331834",
"0.53287... | 0.8019577 | 0 |
Emanates a spine at a random position on the dendritic tree. | def emanate_spine(self,
spine,
id):
# Select a random spine from the spines list
spine_template = random.choice(self.spine_meshes)
# Get a copy of the template and update it
spine_object = nmv.scene.ops.duplicate_object(spine_template, id)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutate_point_trig(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_trig(mutated_genome,index)\n else: #seed == 1:\n shift_point_trig(mutated_genome,index)",
"def position_from_seed(seed):\n random.seed(see... | [
"0.56462944",
"0.54003584",
"0.5304589",
"0.5286129",
"0.5285182",
"0.5238742",
"0.521904",
"0.5194716",
"0.51497424",
"0.51151454",
"0.50812453",
"0.50709474",
"0.5054132",
"0.50421154",
"0.49938712",
"0.49918446",
"0.49793696",
"0.4965727",
"0.49629447",
"0.49620885",
"0.49... | 0.60079974 | 0 |
Add the spines randomly to the morphology. | def add_spines_to_morphology(self):
# A list of the data of all the spines that will be added to the neuron morphology
spines_list = list()
# Remove the internal samples, or the samples that intersect the soma at the first
# section and each arbor
nmv.skeleton.ops.apply_operati... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert_bonuses(self):\n segs = random.sample(self.segments, 2)\n\n for s in segs:\n offset = random.randint(-10, 10) / 10.0\n self.add_sprite(s, \"bonus\", offset)",
"def random_terrain(self):\n terrain_segments = [] # To hold the list of segments that will be adde... | [
"0.60628176",
"0.5960485",
"0.59101003",
"0.5616719",
"0.56113416",
"0.5536109",
"0.5498553",
"0.5497647",
"0.5496644",
"0.54036343",
"0.52970546",
"0.52900267",
"0.5285795",
"0.5261114",
"0.5230162",
"0.5227572",
"0.5211758",
"0.5208825",
"0.5196995",
"0.5181738",
"0.5178236... | 0.7675542 | 0 |
x is the timedomain signal fs is the sampling frequency framesz is the frame size, in seconds hop is the the time between the start of consecutive frames, in seconds | def stft(x, fs, framesz, hop):
framesamp = int(framesz*fs)
hopsamp = int(hop*fs)
w = scipy.hamming(framesamp)
X = scipy.array([scipy.fft(w*x[i:i+framesamp],256)
for i in range(0, len(x)-framesamp, hopsamp)])
X=X[:,0:128]
return X | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stft(x, fs, framesz, hop):\n framesamp = int(framesz*fs)\n hopsamp = int(hop*fs)\n w = scipy.hamming(framesamp)\n X = scipy.array([scipy.fft(w*x[i:i+framesamp]) \n for i in range(0, len(x)-framesamp, hopsamp)])\n return X",
"def time_to_frames(times, sr=22050, hop_length=51... | [
"0.6516196",
"0.6015201",
"0.5844554",
"0.5838434",
"0.57497185",
"0.5695513",
"0.5695148",
"0.5584178",
"0.5576615",
"0.5564907",
"0.5522588",
"0.55157",
"0.5506528",
"0.55026734",
"0.5488658",
"0.54744744",
"0.5466684",
"0.54563797",
"0.5440899",
"0.5399331",
"0.5397807",
... | 0.64851505 | 1 |
Test simple JDBC query consumer origin for network fault tolerance. We delay the pipeline using a Delay stage so as we get time to shut the SDC container network to test retry and resume logic of origin stage. | def test_query_consumer_network(sdc_builder, sdc_executor, database):
number_of_rows = 10_000
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer')
jdbc_query_consumer... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_jdbc_query_executor(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = _create_table(table_name, database)\n\n DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]\n pipeline_builder = sdc_buil... | [
"0.70550436",
"0.7047451",
"0.6958239",
"0.69195366",
"0.6885504",
"0.68017673",
"0.6705965",
"0.653406",
"0.64730465",
"0.6431238",
"0.63461107",
"0.628354",
"0.6265989",
"0.6264696",
"0.6191097",
"0.6113087",
"0.59760135",
"0.5914398",
"0.58425444",
"0.57655364",
"0.5700489... | 0.79750395 | 0 |
Remove the image data at filename within the store. | def remove_data(writer: UFOWriter, filename: str) -> None:
writer.removeImage(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_image_file(sender, instance, **kwargs):\n # Pass false so ImageField doesn't save the model.\n instance.image.delete(False)",
"def delImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n img.delete()\n return",
"def del_image(self, name):\r\n if self.images i... | [
"0.7071417",
"0.7065978",
"0.70587105",
"0.6948764",
"0.6843448",
"0.6837711",
"0.6807847",
"0.67592394",
"0.6742228",
"0.67016274",
"0.66833335",
"0.6654753",
"0.6651216",
"0.66283286",
"0.65885246",
"0.6580812",
"0.65307504",
"0.65212464",
"0.64562976",
"0.64470166",
"0.644... | 0.8101599 | 0 |
This function will filter by times. | def _filterTimes(self):
print(self.tRange)
idT = np.where((self.tRange[0] > np.array(self.rawD['Epoch'][:])) &
(self.tRange[1] < np.array(self.rawD['Epoch'][:])))[0]
#print(self.rawD['Epoch'][:100])
print(idT)
# Filter data
for key in filter(lambda x:... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' ... | [
"0.7031687",
"0.67676854",
"0.6751928",
"0.6737246",
"0.66668457",
"0.6608179",
"0.6398275",
"0.6397288",
"0.63106745",
"0.6258909",
"0.61309755",
"0.61288965",
"0.60868865",
"0.60805154",
"0.6061895",
"0.5981721",
"0.59279084",
"0.58912027",
"0.57801604",
"0.57451755",
"0.57... | 0.700179 | 1 |
Permutes the weight to use the sliced rotary implementation. | def permute_for_sliced_rotary(weight, num_heads, rotary_dim=None):
if rotary_dim is not None:
weight = weight.reshape(num_heads, weight.shape[0] // num_heads, -1)
rotary_weight = weight[:, :rotary_dim]
rotary_weight = permute_for_sliced_rotary(
rotary_weight.reshape(num_heads * ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def weight_rotate(weight):\n weight = weight.permute(1, 2, 3, 0)\n return weight",
"def permute(self):\n raise NotImplementedError()",
"def apply_permutation(hyper, pol, perm):\n pass",
"def rotate_weights(w, s):\n\n n = len(w)\n\n w = [\n np.sum([w[j] * s[(n + i - j)... | [
"0.6902893",
"0.6378392",
"0.60350555",
"0.57593405",
"0.5576342",
"0.55144465",
"0.5473023",
"0.5443731",
"0.52880347",
"0.52178",
"0.52050495",
"0.51479506",
"0.514577",
"0.5136151",
"0.51311886",
"0.5107614",
"0.5107106",
"0.5107106",
"0.5107106",
"0.5107106",
"0.5107106",... | 0.7253853 | 0 |
Retrieve box office dataset using Pandas. | def read_box_office_data():
df = pd.read_csv(dataset)
# Return a DICT of the Rank column in our CSV:
# {'Rank': 7095, 'Release_Group': 'Rififi 2000 Re-release', 'Worldwide': 463593, 'Domesti
# c': 460226, 'Domestic_%': 0.992737163848462, 'Foreign': 3367, 'Foreign_%': 0.007262836151538094, 'Year': 2000
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_box_office_data(environ):\n df = pd.read_csv(dataset)\n data = df.to_dict(\"Rank\")\n\n # NOTE: Add QUERY_STRING for number of movies to display\n query_str: str = environ.get(\"QUERY_STRING\")\n print(query_str)\n\n # Add a query string to specify number of records to return\n if que... | [
"0.6330489",
"0.62791806",
"0.6192725",
"0.6002709",
"0.5984528",
"0.5924598",
"0.5915877",
"0.59097326",
"0.58125913",
"0.5751532",
"0.5750167",
"0.56569916",
"0.56387097",
"0.55906785",
"0.55713004",
"0.5560078",
"0.5538441",
"0.55188626",
"0.5498248",
"0.5486015",
"0.54845... | 0.6367827 | 0 |
RungeKutta integrator (4th order) Input arguments x = current value of dependent variable t = independent variable (usually time) tau = step size (usually timestep) derivsRK = right hand side of the ODE; derivsRK is the name of the function which returns dx/dt Calling format derivsRK (x,t,param). param = extra paramete... | def rk4(x,t,tau,derivsRK,param): #couldn't get it to import right so I just copy pasted.
half_tau = 0.5*tau
F1 = derivsRK(x,t,param)
t_half = t + half_tau
xtemp = x + half_tau*F1
F2 = derivsRK(xtemp,t_half,param)
xtemp = x + half_tau*F2
F3 = derivsRK(xtemp,t_half,param)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rk4(x,t,tau,derivsRK,param):\n half_tau = 0.5*tau\n F1 = derivsRK(x,t,param)\n t_half = t + half_tau\n xtemp = x + half_tau*F1\n F2 = derivsRK(xtemp,t_half,param)\n xtemp = x + half_tau*F2\n F3 = derivsRK(xtemp,t_half,param)\n t_full = t + tau\n xtemp = x + tau*F3\n F4 = derivsRK(... | [
"0.7320876",
"0.70679027",
"0.6769503",
"0.6722183",
"0.67205286",
"0.6682625",
"0.6498219",
"0.64427054",
"0.63156617",
"0.6256246",
"0.6184781",
"0.6130225",
"0.61295813",
"0.6067873",
"0.60653573",
"0.60502803",
"0.59177953",
"0.59087306",
"0.59054327",
"0.588712",
"0.5872... | 0.7259235 | 1 |
Pixel L1 loss within the hole / mask | def loss_hole(self, mask, y_true, y_pred):
return self.l1((1-mask) * y_true, (1-mask) * y_pred) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lapsharp(image, maskret = False):\n #padded_image = np.pad(img, (1, 1), mode = 'symmetric')\n # lap is linear therefore;\n # lap f(x,y) = f(x + 1, y) + f(x - 1, y) + f(x, y + 1) + f(x, y - 1) - 4f(x,y)...\n #--------------------\n c = -1 # Depends on kernel\n # make ze... | [
"0.65443623",
"0.6356551",
"0.6331319",
"0.60069346",
"0.59986883",
"0.5970008",
"0.5963153",
"0.59551394",
"0.58635867",
"0.5861597",
"0.58614784",
"0.58338666",
"0.5818547",
"0.5816668",
"0.58154553",
"0.5807206",
"0.57979506",
"0.5765049",
"0.57532686",
"0.57518965",
"0.57... | 0.68490756 | 0 |
Perceptual loss based on VGG16, see. eq. 3 in paper | def loss_perceptual(self, vgg_out, vgg_gt, vgg_comp):
loss = 0
for o, c, g in zip(vgg_out, vgg_comp, vgg_gt):
loss += self.l1(o, g) + self.l1(c, g)
return loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vae_loss(x, t_decoded):\r\n return K.mean(reconstruction_loss(x, t_decoded))",
"def transformed_outcome_loss(tau_pred, y_true, g, prob_treatment):\n # Transformed outcome\n y_trans = (g - prob_treatment) * y_true / (prob_treatment * (1-prob_treatment))\n loss = np.mean(((y_trans - tau_pred)**2))... | [
"0.66081715",
"0.65532786",
"0.6524559",
"0.6420034",
"0.63748574",
"0.6323795",
"0.6241175",
"0.62063944",
"0.6180593",
"0.61634547",
"0.61377925",
"0.6124796",
"0.6107711",
"0.6096048",
"0.6095119",
"0.60922927",
"0.60557693",
"0.60514146",
"0.6050098",
"0.60289675",
"0.602... | 0.7599071 | 0 |
Sets pandas to display really big data frames. | def big_dataframe_setup(): # pragma: no cover
pd.set_option("display.max_colwidth", sys.maxsize)
pd.set_option("max_colwidth", sys.maxsize)
# height has been deprecated.
# pd.set_option('display.height', sys.maxsize)
pd.set_option("display.max_rows", sys.maxsize)
pd.set_option("display.max_colu... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_df(df):\n with pd.option_context(\"display.max_rows\", 1000, \"display.max_columns\", 100):\n display(df.head(10))",
"def display_all(df):\n with pd.option_context(\"display.max_rows\", 1000):\n with pd.option_context(\"display.max_columns\", 1000):\n display(df)",
"d... | [
"0.7420032",
"0.73733944",
"0.73476887",
"0.71943045",
"0.7025156",
"0.6742269",
"0.6694681",
"0.66209984",
"0.65980023",
"0.6392972",
"0.6373981",
"0.6253341",
"0.6170743",
"0.616121",
"0.6146324",
"0.6051942",
"0.6036005",
"0.5962156",
"0.59404975",
"0.5803421",
"0.5718344"... | 0.8036398 | 0 |
Return a nicely formatted HTML code string for the given dataframe. Arguments | def df_to_html(df, percentage_columns=None): # pragma: no cover
big_dataframe_setup()
try:
res = "<br><h2> {} </h2>".format(df.name)
except AttributeError:
res = ""
df.style.set_properties(**{"text-align": "center"})
res += df.to_html(
formatters=_formatters_dict(
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disp(df):\n display(HTML(df.to_html(index=False)))",
"def df_to_html(df, img_formatter=images_formatter):\n pd.set_option(\"display.max_colwidth\", -1)\n pd.set_option(\"display.max_columns\", -1)\n cond_formatter = lambda imgs: images_formatter(imgs, col=1)\n html_table = df.to_html(\n ... | [
"0.7709254",
"0.73659784",
"0.71026874",
"0.69458956",
"0.67958844",
"0.676329",
"0.67408717",
"0.6718637",
"0.66966647",
"0.6656633",
"0.6579785",
"0.651067",
"0.64621204",
"0.64261204",
"0.6367933",
"0.6338818",
"0.63245225",
"0.62568015",
"0.6219889",
"0.6177059",
"0.61611... | 0.76079047 | 1 |
Produce the CSV of a solution | def produce_solution(y):
with open('out.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',', lineterminator="\n")
writer.writerow(['id', 'y'])
for i in range(y.shape[0]):
writer.writerow([i, y[i]]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generateSolution(self, cont):\n solnf = self.outdir + \"/tracks_soln.csv\"\n old = os.dup(1)\n sys.stdout.flush()\n os.close(1)\n os.open(solnf, os.O_WRONLY | os.O_CREAT)\n cont.printallSolutions(yetkin=self.yetkin)\n sys.stdout.flush()\n os.close(1)\n ... | [
"0.6973687",
"0.68380153",
"0.6710647",
"0.65229183",
"0.6497935",
"0.6473903",
"0.6452836",
"0.64365023",
"0.6412565",
"0.6315289",
"0.6276529",
"0.6232242",
"0.6230959",
"0.62285423",
"0.61607337",
"0.61379147",
"0.6136664",
"0.6123499",
"0.6109184",
"0.609388",
"0.6078443"... | 0.6963844 | 1 |
Build a cached dict with settings.INSTALLED_APPS as keys and the 'templates' directory of each application as values. | def app_templates_dirs(self):
app_templates_dirs = {}
for app in settings.INSTALLED_APPS:
if not six.PY3:
fs_encoding = (sys.getfilesystemencoding() or
sys.getdefaultencoding())
try:
mod = import_module(app)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_templates_dirs(self): \n from pkg_resources import resource_filename\n return [ resource_filename(__name__, 'templates') ]\n # return []",
"def get_local_app_list():\n\t\tapp_list = [\n\t\t\t{\n\t\t\t\t'name': app,\n\t\t\t\t'dir': os.path.dirname(os.path.abspath(import_module(app).__file__)),\n... | [
"0.62339866",
"0.6075647",
"0.60284287",
"0.60173535",
"0.6008311",
"0.59243536",
"0.5887615",
"0.58762103",
"0.58131",
"0.57841635",
"0.57560146",
"0.5703265",
"0.5703265",
"0.56275016",
"0.5590184",
"0.5588373",
"0.55697465",
"0.55501664",
"0.55084664",
"0.5405607",
"0.5368... | 0.7934003 | 0 |
Validates that the given path looks like a valid chip repository checkout. | def ValidateRepoPath(context, parameter, value):
if value.startswith('/TEST/'):
# Hackish command to allow for unit testing
return value
for name in ['BUILD.gn', '.gn', os.path.join('scripts', 'bootstrap.sh')]:
expected_file = os.path.join(value, name)
if not os.path.exists(expe... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def path_validate(path):\n # functionality to be added later\n return path",
"def validpath(self, path):\n root = self.realpath(self.root)\n path = self.realpath(path)\n if not self.root.endswith(os.sep):\n root = self.root + os.sep\n if not path.endswith(os.sep):\n ... | [
"0.57880497",
"0.5727862",
"0.57209194",
"0.5713962",
"0.563717",
"0.56051886",
"0.5598306",
"0.55910486",
"0.5558308",
"0.5513613",
"0.5482505",
"0.54587203",
"0.54361695",
"0.5406059",
"0.5382245",
"0.531769",
"0.53093684",
"0.5304615",
"0.5287788",
"0.5281228",
"0.52625215... | 0.6077597 | 0 |
Turn on/off the joint lock so you can manipulate the WAM by hand. | def set_wam_joint_hold(hold):
msg = HoldRequest()
msg.hold = hold
wam_hold_service = rospy.ServiceProxy('/wam/hold_joint_pos', Hold)
try:
resp1 = wam_hold_service(msg)
except rospy.ServiceException as exc:
print("Service did not process request: " + str(exc)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def turn_on(self, **kwargs):\n self.set_graceful_lock(True)\n self.robot.start_cleaning()",
"def ToggleLock(self, event):\n pass",
"def turn_on(self):\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_HOME\n )\n self... | [
"0.6700339",
"0.6349209",
"0.6309134",
"0.6272996",
"0.61883575",
"0.6179182",
"0.61776733",
"0.61142284",
"0.6089046",
"0.6087645",
"0.60502875",
"0.603042",
"0.60217047",
"0.59520024",
"0.59362584",
"0.59181535",
"0.58999264",
"0.5894002",
"0.5885579",
"0.58486074",
"0.5844... | 0.63843507 | 1 |
Create a trajectory from start_position to end_position. The trajectory is the linear interpolation from start to end. It will last duration_of_trajectory seconds. Be careful that you pick your start/end points such that the hand doesn't turn into the arm. | def create_joint_trajectory(start_position, end_position,
duration_of_trajectory, frequency_of_trajectory):
frequency_of_ros_messages = frequency_of_trajectory # in Hz.
number_of_way_points = duration_of_trajectory * frequency_of_ros_messages
number_of_joints = start_position._... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_trajectory(self, NextwpPosition, NextwpOrientation):\n d = np.linalg.norm(self.CurrentPosition - NextwpPosition)\n inter_segment_distance = 1\n self.no_of_segments = 1+int(d//inter_segment_distance)\n \n\n # enter sequence of waypoints: no of points should be self.no_of_... | [
"0.630769",
"0.62118566",
"0.6039857",
"0.59671867",
"0.59664977",
"0.58373964",
"0.578141",
"0.57333755",
"0.57225364",
"0.5712431",
"0.5712421",
"0.57060003",
"0.57044417",
"0.569924",
"0.5694327",
"0.5621275",
"0.561819",
"0.56162274",
"0.56119204",
"0.56117755",
"0.558021... | 0.7083624 | 0 |
This is used to send a trajectory to the WAM arm at a given frequency. | def send_joint_trajectory(trajectory, velocities, frequency=250):
pub = rospy.Publisher("/wam/jnt_pos_cmd", RTJointPos, queue_size=10)
#If wam_node is running, it will be connected to this publisher.
#Mostly this loop is here because you want to make sure the publisher
#gets set up before it starts send... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_speed(self, linear_speed, angular_speed):\n ### Makes a new Twist message\n msg_cmd_vel = Twist()\n \t# Linear velocity\n \tmsg_cmd_vel.linear.x = linear_speed\n \tmsg_cmd_vel.linear.y = 0.0\n \tmsg_cmd_vel.linear.z = 0.0\n \t# Angular velocity\n \tmsg_cmd_vel.angular.x = 0... | [
"0.6385109",
"0.62068605",
"0.6045478",
"0.59710425",
"0.58719957",
"0.5833281",
"0.5810495",
"0.5604722",
"0.55937123",
"0.5562863",
"0.55398273",
"0.55012274",
"0.5495552",
"0.54938346",
"0.5476565",
"0.54658544",
"0.54509515",
"0.54400605",
"0.54393345",
"0.5429929",
"0.54... | 0.643952 | 0 |
Create and send a trajectory that's a linear interpolation between wam_start and wam_end that lasts duration seconds send at frequency. | def create_and_send_wam_trajectory(wam_start, wam_end, duration, frequency=250):
joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,
duration, frequency)
send_joint_trajectory(joint_traj, joint_vels, frequency) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_wam_from_current_location(wam_end, duration, frequency=250):\n wam_start = get_wam_joint_coordinates()\n joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,\n duration, frequency)\n send_joint_trajectory(joint_traj, joint_vels, f... | [
"0.60942864",
"0.5600358",
"0.559887",
"0.54943013",
"0.5456494",
"0.52778065",
"0.52490836",
"0.5219559",
"0.5138963",
"0.51206017",
"0.5116879",
"0.51017463",
"0.5097054",
"0.5094589",
"0.5087105",
"0.50795066",
"0.5061036",
"0.50578904",
"0.5024878",
"0.5002162",
"0.497781... | 0.7602451 | 0 |
Create and send a trajectory that's a linear interpolation between where the wam currently is and wam_end that lasts duration seconds. Publishes the trajectory at frequency Hz. | def move_wam_from_current_location(wam_end, duration, frequency=250):
wam_start = get_wam_joint_coordinates()
joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,
duration, frequency)
send_joint_trajectory(joint_traj, joint_vels, frequency) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_and_send_wam_trajectory(wam_start, wam_end, duration, frequency=250):\n\n joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,\n duration, frequency)\n send_joint_trajectory(joint_traj, joint_vels, frequency)",
"def send_joint_t... | [
"0.7318101",
"0.58122194",
"0.5594247",
"0.5316941",
"0.52576196",
"0.52559245",
"0.524749",
"0.5236174",
"0.5196419",
"0.5157173",
"0.51346946",
"0.5111613",
"0.5108861",
"0.51046497",
"0.50656474",
"0.4978461",
"0.4945043",
"0.49422932",
"0.49275485",
"0.49133897",
"0.49077... | 0.5945982 | 1 |
Uses a service call to have the WAM move to the end point. Goes at its own pace. | def request_wam_move(end_point, velocity_limits):
move_wam_srv = rospy.ServiceProxy('/wam/joint_move', JointMove)
try:
resp1 = move_wam_srv(end_point)
except rospy.ServiceException as exc:
print("Service did not process request: " + str(exc)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def service(ants=0, tmo=200, waiton=-2) :\n return stow( ants, tmo, waiton, SERVICE );",
"def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)",
"def flash_move(self,params):\n direction = params['direction']\... | [
"0.6199987",
"0.61403936",
"0.5519624",
"0.5468195",
"0.53626955",
"0.52841926",
"0.5257299",
"0.52350175",
"0.51708645",
"0.5167345",
"0.51569194",
"0.5134525",
"0.5132805",
"0.5116559",
"0.51158285",
"0.51156914",
"0.5107367",
"0.5070184",
"0.5053658",
"0.5045911",
"0.50426... | 0.68024755 | 0 |
Return the most common element in a list of votes. If there are multiple voters with the same element, one of them is returned at random. | def majority_vote(votes):
vote_counts = {}
for vote in votes:
if vote in vote_counts:
vote_counts[vote] += 1
else:
vote_counts[vote] = 1
winners = []
max_choice = max(vote_counts.values())
for vote, count in vote_counts.items():
if count == max_choice... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def majority_vote(votes):\n\n vote_counts = {} # dictionary\n for vote in votes:\n # known word\n if vote in vote_counts:\n vote_counts[vote] += 1\n # unknown word\n else:\n vote_counts[vote] = 1\n\n print(vote_counts)\n # but who is the winner?\... | [
"0.7092113",
"0.642857",
"0.64019984",
"0.6061767",
"0.60235876",
"0.59869194",
"0.597398",
"0.59564465",
"0.5941662",
"0.5912173",
"0.58963436",
"0.58896893",
"0.58867997",
"0.5852934",
"0.58495456",
"0.5814076",
"0.579337",
"0.57645696",
"0.5739213",
"0.5736768",
"0.5644522... | 0.6985902 | 1 |
Find the k nearest neighbors of point p in array points. If multiple points are the equally far, the ones with lower values in lower dimensions are preferred. For example, if (2,2) and (2,4) are equally distant, (2,2) would be chosen. | def find_nearest_neighbors(p, points, k):
import numpy as np
distances = np.zeros(points.shape[0])
for i in range(len(distances)):
distances[i] = distance(p,points[i])
ind = np.argsort(distances)
return ind[0:k] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]",
"def knn(p, pnts, k=1, return_dist=True):\r\n def _remove_self_(p, pnts):\r\n \"\"\"Remove a poi... | [
"0.8639097",
"0.7705145",
"0.7612184",
"0.7396099",
"0.72835445",
"0.7229254",
"0.7169603",
"0.7161284",
"0.7108857",
"0.7079597",
"0.7041696",
"0.69775057",
"0.6954059",
"0.69533056",
"0.6897031",
"0.6890956",
"0.68394387",
"0.67642987",
"0.67409587",
"0.66584134",
"0.662418... | 0.8469665 | 1 |
Test that the graph of a RegisteredPipeline can be copied. Each step in the copied graph should be a new object, but have the same name, predecessors, and model version as the original. | def test_copy_graph(
make_mock_pipeline_graph,
make_mock_registered_model_version,
make_mock_registered_model,
) -> None:
mocked_rm = make_mock_registered_model(id=123, name="test_rm")
with patch.object(
verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm
):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_clone_scenario(self):\n pass",
"def test_clone(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n ga.clone()\n\n # should have created a new generation\n self.assertEqual(len(ga.generations), 2)\n\n # should have... | [
"0.66606313",
"0.66042787",
"0.6354918",
"0.63137066",
"0.62398744",
"0.6187213",
"0.61756086",
"0.610647",
"0.60987633",
"0.60784256",
"0.60639167",
"0.59603596",
"0.59550446",
"0.5896772",
"0.58627504",
"0.58563215",
"0.5846192",
"0.5841501",
"0.5821267",
"0.5804612",
"0.57... | 0.85880643 | 0 |
Verify the expected sequence of calls when a pipeline definition is logged as an artifact to the pipeline's model version. Fetching the registered model version is patched instead of mocking a response to avoid having to pass the RM's id down through multiple pytest fixtures. | def test_log_pipeline_definition_artifact(
model_version_name,
mocked_responses,
make_mock_pipeline_graph,
make_mock_registered_model,
make_mock_registered_model_version,
) -> None:
rm = make_mock_registered_model(id=123, name="test_rm")
rmv = make_mock_registered_model_version()
# Fetch... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_pipeline_definition_artifact(\n make_mock_registered_model_version,\n make_mock_simple_pipeline_definition,\n) -> None:\n rmv = make_mock_registered_model_version()\n pipeline_definition = RegisteredPipeline._get_pipeline_definition_artifact(\n registered_model_version=rmv,\n )\n... | [
"0.732434",
"0.7297416",
"0.69765425",
"0.63282025",
"0.62099767",
"0.6095259",
"0.6094727",
"0.60649484",
"0.58922017",
"0.587223",
"0.584923",
"0.5840194",
"0.5815585",
"0.578171",
"0.5772538",
"0.57495606",
"0.57491666",
"0.5727522",
"0.5712284",
"0.5710002",
"0.56978595",... | 0.8264145 | 0 |
Test that a pipeline definition artifact can be fetched from the registered model version associated with a RegisteredPipeline object. | def test_get_pipeline_definition_artifact(
make_mock_registered_model_version,
make_mock_simple_pipeline_definition,
) -> None:
rmv = make_mock_registered_model_version()
pipeline_definition = RegisteredPipeline._get_pipeline_definition_artifact(
registered_model_version=rmv,
)
assert pi... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_from_pipeline_definition(\n make_mock_registered_model_version,\n mocked_responses,\n) -> None:\n rmv = make_mock_registered_model_version()\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1\",\n json={},\n status=200,\n ... | [
"0.7688491",
"0.72997695",
"0.72265136",
"0.69778377",
"0.6683026",
"0.6655028",
"0.646409",
"0.6193778",
"0.61219335",
"0.60657907",
"0.60468465",
"0.5986037",
"0.59753895",
"0.5972712",
"0.59150094",
"0.58124006",
"0.5805494",
"0.57608837",
"0.56313217",
"0.5624219",
"0.560... | 0.8222838 | 0 |
Test that a pipeline definition can be constructed from a RegisteredPipeline object. In depth testing of the `_to_graph_definition` and `_to_steps_definition` functions are handled in unit tests for PipelineGraph. | def test_to_pipeline_definition(
make_mock_pipeline_graph,
make_mock_registered_model_version,
make_mock_registered_model,
) -> None:
mocked_rm = make_mock_registered_model(id=123, name="test_rm")
with patch.object(
verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_from_pipeline_definition(\n make_mock_registered_model_version,\n mocked_responses,\n) -> None:\n rmv = make_mock_registered_model_version()\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1\",\n json={},\n status=200,\n ... | [
"0.6992653",
"0.6769633",
"0.6746867",
"0.66939527",
"0.6548289",
"0.6532664",
"0.6424661",
"0.63188297",
"0.60581475",
"0.6034345",
"0.59983337",
"0.59542525",
"0.5939424",
"0.59258586",
"0.5908199",
"0.5816665",
"0.5774693",
"0.5701459",
"0.5650962",
"0.56478715",
"0.564152... | 0.77992266 | 0 |
Test that a pipeline configuration can be constructed from a RegisteredPipeline object and a valid list of pipeline resources, where resources are provided for every step. | def test_to_pipeline_configuration_valid_complete(
resources,
make_mock_pipeline_graph,
make_mock_registered_model_version,
make_mock_registered_model,
) -> None:
mocked_rm = make_mock_registered_model(id=123, name="test_rm")
with patch.object(
verta.pipeline.PipelineStep, "_get_register... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_to_pipeline_configuration_valid_incomplete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineSte... | [
"0.7530245",
"0.74972314",
"0.7201657",
"0.6531304",
"0.6396485",
"0.62914926",
"0.6223942",
"0.6215258",
"0.6160244",
"0.6155715",
"0.61122143",
"0.60925037",
"0.60755783",
"0.60367584",
"0.6007232",
"0.5996004",
"0.59956264",
"0.5969494",
"0.59296113",
"0.59294957",
"0.5909... | 0.7840763 | 0 |
Test that a pipeline configuration can be constructed from a RegisteredPipeline object and a valid list of pipeline resources, where resources are not provided for every step. | def test_to_pipeline_configuration_valid_incomplete(
resources,
make_mock_pipeline_graph,
make_mock_registered_model_version,
make_mock_registered_model,
) -> None:
mocked_rm = make_mock_registered_model(id=123, name="test_rm")
with patch.object(
verta.pipeline.PipelineStep, "_get_regist... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_to_pipeline_configuration_valid_complete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep,... | [
"0.77696097",
"0.7578576",
"0.7403642",
"0.6423569",
"0.640242",
"0.63075686",
"0.61619073",
"0.6150967",
"0.60948867",
"0.6091435",
"0.60331774",
"0.6016926",
"0.60129344",
"0.5994113",
"0.59888446",
"0.59842837",
"0.5973404",
"0.5892259",
"0.5888149",
"0.5880129",
"0.588012... | 0.7685353 | 1 |
Test that the expected errors are raised when an invalid pipeline resources are provided. | def test_to_pipeline_configuration_invalid_resources(
resources,
make_mock_pipeline_graph,
make_mock_registered_model_version,
make_mock_registered_model,
) -> None:
mocked_rm = make_mock_registered_model(id=123, name="test_rmv")
with patch.object(
verta.pipeline.PipelineStep, "_get_regi... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_creation_fail(self):\n\n # Assert that a RelaxError occurs when the pipe type is invalid.\n self.assertRaises(RelaxError, pipes.create, 'new', 'x')",
"def test_execute_pipeline_two(self):\n task_list = [Test()]\n with self.assertRaises(AttributeError):\n execute_pi... | [
"0.69556063",
"0.6845119",
"0.6751194",
"0.67011756",
"0.66657954",
"0.6635373",
"0.6581409",
"0.6536569",
"0.6497356",
"0.6491855",
"0.6488647",
"0.64853007",
"0.64841676",
"0.64522225",
"0.6404109",
"0.6401285",
"0.6376981",
"0.6353701",
"0.6349466",
"0.6321207",
"0.6282897... | 0.7442444 | 0 |
Test that a pipeline configuration can be constructed from a RegisteredPipeline object without providing any pipeline resources. | def test_to_pipeline_configuration_no_resources(
make_mock_pipeline_graph,
make_mock_registered_model_version,
make_mock_registered_model,
) -> None:
mocked_rm = make_mock_registered_model(id=123, name="test_rm")
with patch.object(
verta.pipeline.PipelineStep, "_get_registered_model", return... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_to_pipeline_configuration_valid_incomplete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineSte... | [
"0.702158",
"0.6975604",
"0.6957339",
"0.69055235",
"0.6709925",
"0.6463869",
"0.6353609",
"0.63210964",
"0.62990576",
"0.6240508",
"0.6217712",
"0.621394",
"0.6183469",
"0.6107562",
"0.60923505",
"0.6073859",
"0.6047508",
"0.60444486",
"0.59288347",
"0.5884892",
"0.5882104",... | 0.76080483 | 0 |
Test that a RegisteredPipeline object can be constructed from a pipeline definition. The model version's `_get_artifact` function is overidden in the mocked RMV fixture to return a simple, consistent pipeline definition. Calls related to the fetching of the RMV and RM are mocked. | def test_from_pipeline_definition(
make_mock_registered_model_version,
mocked_responses,
) -> None:
rmv = make_mock_registered_model_version()
mocked_responses.get(
f"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1",
json={},
status=200,
)
mocked_... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_pipeline_definition_artifact(\n make_mock_registered_model_version,\n make_mock_simple_pipeline_definition,\n) -> None:\n rmv = make_mock_registered_model_version()\n pipeline_definition = RegisteredPipeline._get_pipeline_definition_artifact(\n registered_model_version=rmv,\n )\n... | [
"0.77940893",
"0.77576274",
"0.7122229",
"0.7110022",
"0.67007685",
"0.6504595",
"0.63064855",
"0.62975585",
"0.6293141",
"0.6096061",
"0.58818185",
"0.5858421",
"0.58582985",
"0.58366627",
"0.58244514",
"0.58215785",
"0.5737838",
"0.5727093",
"0.57111335",
"0.5692733",
"0.56... | 0.7792976 | 1 |
Test that we throw the correct exception when a user tries to mutate the steps of a graph in an inappropriate way. | def test_bad_mutation_of_graph_steps_exception(
make_mock_registered_model,
make_mock_registered_model_version,
make_mock_pipeline_graph,
):
mocked_rm = make_mock_registered_model(id=123, name="test_rm")
mocked_rmv = make_mock_registered_model_version()
with patch.object(
verta.pipeline.... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_case31(self):\n\n self.assertRaises(ValueError, lambda: self.graph1.swapStudents(\"student1\",\"supervisor2\",\"student3\",\"supervisor1\"))",
"def test_runtime_errors(self, graph_entry_class):\n graph_entry_class.return_value.state = \"Pending\"\n graph_entry_class.return_value.pat... | [
"0.66778755",
"0.6674113",
"0.6551536",
"0.6528747",
"0.6398362",
"0.6366871",
"0.634431",
"0.6334882",
"0.6333377",
"0.6324761",
"0.6293048",
"0.6279606",
"0.6269768",
"0.6257504",
"0.6240395",
"0.6227926",
"0.6223702",
"0.6170484",
"0.6166687",
"0.6150387",
"0.6145652",
"... | 0.7053854 | 0 |
Invert using 2D convolution function, using the specified convolution function Use the image im as a template. Do PSF in a separate call. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. . Any shifting needed is performed here. | def invert_2d(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True,
gcfcf=None, **kwargs) -> (Image, numpy.ndarray):
assert isinstance(vis, Visibility), vis
svis = copy_visibility(vis)
if dopsf:
svis.data['vis'][...] = 1.0+0.0j
svis = shift_vis_to_... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_conv2d():\n img = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n template = np.array([\n [1, 0],\n [1, 0],\n ])\n template = np.flipud(np.fliplr(template))\n return fftconvolve(img, template, mode='valid')",
... | [
"0.62379956",
"0.6189527",
"0.60548985",
"0.5914404",
"0.58883965",
"0.58311486",
"0.58114636",
"0.5808238",
"0.5806143",
"0.57928836",
"0.5729075",
"0.57058555",
"0.5699199",
"0.56814635",
"0.56235063",
"0.56076056",
"0.55912095",
"0.5589819",
"0.55747116",
"0.55583715",
"0.... | 0.62493515 | 0 |
Predict the visibility from a Skycomponent, add to existing visibility, for Visibility or BlockVisibility | def predict_skycomponent_visibility(vis: Union[Visibility, BlockVisibility],
sc: Union[Skycomponent, List[Skycomponent]]) -> Union[Visibility, BlockVisibility]:
if sc is None:
return vis
if not isinstance(sc, collections.Iterable):
sc = [sc]
if ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.d... | [
"0.63595897",
"0.6236446",
"0.5877378",
"0.5763672",
"0.56774557",
"0.5585451",
"0.55744827",
"0.5517068",
"0.55012023",
"0.545785",
"0.54522437",
"0.54522437",
"0.54522437",
"0.54522437",
"0.54522437",
"0.54522437",
"0.54522437",
"0.54522437",
"0.54522437",
"0.54522437",
"0.... | 0.68695766 | 0 |
Make an empty image from params and Visibility This makes an empty, template image consistent with the visibility, allowing optional overriding of select parameters. This is a convenience function and does not transform the visibilities. | def create_image_from_visibility(vis, **kwargs) -> Image:
assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \
"vis is not a Visibility or a BlockVisibility: %r" % (vis)
log.debug("create_image_from_visibility: Parsing parameters to get definition of WCS")
imagecentre ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _blankimage():\n img = TK.PhotoImage(width=1, height=1)\n img.blank()\n return img",
"def blank(width, height, channels=3, value=0):\n blank_image = np.full((height, width, channels), value, np.uint8)\n return Image(img=blank_image)",
"def _read_empty(self):\n self... | [
"0.6082279",
"0.58083063",
"0.5590598",
"0.5545989",
"0.5522535",
"0.549299",
"0.54731494",
"0.54632276",
"0.538827",
"0.5353823",
"0.5334222",
"0.52348214",
"0.50866646",
"0.5055959",
"0.49559405",
"0.49474832",
"0.4938404",
"0.4938404",
"0.49174386",
"0.4908951",
"0.4874856... | 0.5933551 | 1 |
Methode qui permet de sauvegarder la Course | def save_course(self):
print("Course sauvegardee")
print(self.Course)
print("self.var_nom : "+self.var_nom.get())
self.Course.name=self.var_nom.get()
print("self.vqr_ete : "+str(self.var_ete.get()))
if(self.var_ete.get()==1):
self.Course.season = "Seulement ete"
elif(self.var_hiver.get()==1):
self.... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_courses(std):\n return std[2]",
"def course_tester(courses):\n\n return False",
"def items(self, course):\r\n pass",
"def course(self):\n return self.section.course",
"def addCourse(self):\n\t\tcourseName = input(\"What is the new course name? \")\n\t\tcourseGrade = eval(input(... | [
"0.6775902",
"0.6629837",
"0.66172206",
"0.6547874",
"0.64357376",
"0.63588977",
"0.63275343",
"0.63266003",
"0.63218915",
"0.6313343",
"0.62600297",
"0.62568486",
"0.6254541",
"0.6222173",
"0.621786",
"0.6193602",
"0.6189944",
"0.61762655",
"0.6173807",
"0.6138709",
"0.61324... | 0.7031105 | 0 |
returns a hash of all the configurations without the objects... just tuples of strings and ints | def getConfigHash(self):
strHash = {} #keyed by appName, value = a list of configNames
for appName, app in self._appConfigs.iteritems():
strHash[appName] = app.getAllConfigNames()
return strHash | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __hash__(self) -> int:\n result = []\n for i in self._all_types:\n try:\n hash(i)\n result.append(i)\n except TypeError:\n pass\n return hash(tuple(result))",
"def get_state_hash(self):\n return tuple([tuple([x for... | [
"0.68357277",
"0.67516655",
"0.6746455",
"0.6700139",
"0.6639515",
"0.6615102",
"0.65691537",
"0.6569077",
"0.65256405",
"0.65256405",
"0.65256405",
"0.65256405",
"0.6488089",
"0.6475467",
"0.6468475",
"0.6413573",
"0.6387785",
"0.636889",
"0.63641346",
"0.6332809",
"0.631410... | 0.70877445 | 0 |
Inits dictionnaries for general summary of data with all the modalities of the vocabulary (cover each modality equal to 0) | def initDictionnary(self):
partitions = self.vocabulary.getPartitions()
for partition in partitions:
for mod in partition.modalities:
self.summaryDict[partition.getAttName() + " : " + mod] = 0.0
self.summaryFilteredDict[partition.getAttName() + " : " + mod] = ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_va... | [
"0.61584175",
"0.6107097",
"0.61064154",
"0.6094799",
"0.59786487",
"0.59393686",
"0.58659035",
"0.58530694",
"0.5824693",
"0.5796534",
"0.5796534",
"0.57718873",
"0.5747239",
"0.5724415",
"0.572306",
"0.57186645",
"0.5710093",
"0.57018965",
"0.5688566",
"0.5671068",
"0.56425... | 0.77364576 | 0 |
Initializes listOfTerms dictionnary, used to filter data | def initListOfTerms(self, listOfTerms):
if listOfTerms is not None:
self.listOfTerms = dict()
self.filter = True
for element in listOfTerms:
partition = element.split(':')[0]
modalities = element.split(':')[1]
self.listOfTerms[p... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize_terms_and_postings():\n global dictionary, postings\n for id in document_filenames:\n document = getDocumentContent(document_filenames[id])\n if(document_filenames[id].rfind(\".pdf\") == len(document_filenames[id]) - 4):\n terms = tokenize(document.encode('utf-8'))\n ... | [
"0.6852482",
"0.647374",
"0.63965356",
"0.638963",
"0.637462",
"0.6351122",
"0.6201758",
"0.6140415",
"0.61140746",
"0.6099874",
"0.6098104",
"0.6070301",
"0.59825236",
"0.5968131",
"0.5958227",
"0.5927311",
"0.5926743",
"0.5925386",
"0.59241664",
"0.5897759",
"0.58848786",
... | 0.7638269 | 0 |
returns the cover in the dictionnary of the specified modality | def getCoverFromModalityInDictionnary(self, dictionnary, key):
return dictionnary[key] / 100 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_image_dict(self):\n sprite_sheet = setup.GFX['treasurechest']\n image_dict = {'closed': self.get_image(0, 0, 32, 32, sprite_sheet),\n 'opened': self.get_image(32, 0, 32, 32, sprite_sheet)}\n\n return image_dict",
"def asDict(self) -> dict:\n return {\n ... | [
"0.5663321",
"0.52026194",
"0.5200007",
"0.51598495",
"0.5099573",
"0.50299585",
"0.5023927",
"0.49796367",
"0.4975332",
"0.49580812",
"0.4891326",
"0.4875619",
"0.48679543",
"0.48481658",
"0.48481658",
"0.48460934",
"0.4842605",
"0.4824412",
"0.47903132",
"0.47611475",
"0.47... | 0.6159811 | 0 |
Creates a new version on a versionable object when the object is saved. A new version is created if the type is automatic versionable and has changed or if the user has entered a change note. | def create_version_on_save(context, event):
# according to Products.CMFEditions' update_version_on_edit script
# only version the modified object, not its container on modification
if IContainerModifiedEvent.providedBy(event):
return
# XXX dirty hack for stagingbehavior, which triggers a event... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_initial_version_after_adding(context, event):\n\n pr = getToolByName(context, \"portal_repository\", None)\n if pr is None:\n # This can happen, e.g., when adding a Plone Site with versioning\n # and portal_repository is not yet created\n return\n\n if not pr.isVersionable(... | [
"0.69639736",
"0.66863483",
"0.6405179",
"0.6266524",
"0.61676246",
"0.60798085",
"0.60540813",
"0.59558815",
"0.5853697",
"0.56787676",
"0.56625044",
"0.56550807",
"0.5645847",
"0.56357175",
"0.55998117",
"0.5571658",
"0.55309147",
"0.55187213",
"0.5510219",
"0.5465921",
"0.... | 0.7737758 | 0 |
Creates a initial version on a object which is added to a container and may be just created. The initial version is created if the content type is versionable, automatic versioning is enabled for this type and there is no initial version. If a changeNote was entered it's used as comment. | def create_initial_version_after_adding(context, event):
pr = getToolByName(context, "portal_repository", None)
if pr is None:
# This can happen, e.g., when adding a Plone Site with versioning
# and portal_repository is not yet created
return
if not pr.isVersionable(context):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_version_on_save(context, event):\n # according to Products.CMFEditions' update_version_on_edit script\n\n # only version the modified object, not its container on modification\n if IContainerModifiedEvent.providedBy(event):\n return\n\n # XXX dirty hack for stagingbehavior, which trig... | [
"0.6330616",
"0.58630115",
"0.5736563",
"0.5694096",
"0.55998176",
"0.5510448",
"0.54821205",
"0.527617",
"0.52696854",
"0.5260113",
"0.51173586",
"0.5091388",
"0.50750107",
"0.504838",
"0.50350356",
"0.501439",
"0.49975744",
"0.49866953",
"0.4931183",
"0.4930095",
"0.4930095... | 0.7284928 | 0 |
Function undefine a given virtual network | def net_undefine(network, server, virt="Xen"):
cmd = "virsh -c %s net-undefine %s 2>/dev/null" % (virt2uri(virt), network)
ret, out = run_remote(server, cmd)
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_network(self, name_of_vm):\n try:\n # vmachine = self.get_vm_by_name(name_of_vm)\n vmachine = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n network = None\n devices = vmachine.config.hardware.device\n networks = []\n for d... | [
"0.63707083",
"0.63062227",
"0.62453514",
"0.6191281",
"0.61359066",
"0.60712343",
"0.6025632",
"0.6006481",
"0.5992399",
"0.5980078",
"0.59744585",
"0.5953473",
"0.5941246",
"0.5902225",
"0.5895596",
"0.5824276",
"0.5809243",
"0.58091104",
"0.58025676",
"0.5797904",
"0.57742... | 0.7966817 | 0 |
Sets root directory fr GUI based on config file | def set_root(self):
config_dir = os.path.expanduser("~/.local/shs")
config_file = os.path.join(config_dir, "shs_gui.cfg")
# check the file and create one if it's not there
if not os.path.isfile(config_file):
os.makedirs(config_dir)
open(config_file, 'w').close()
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_rootdir(configdict, config_file):\n if 'rootdir' not in configdict or not configdict['rootdir']:\n configdict['rootdir'] = os.path.dirname(config_file)",
"def set_root(self, root):\n self.root_path = root",
"def set_cont_dir(self):\n cont_dir = select_dir(os.getcwd())\n i... | [
"0.6675416",
"0.6579016",
"0.6508349",
"0.64902014",
"0.6410637",
"0.6409476",
"0.63489723",
"0.6332154",
"0.6329878",
"0.6146685",
"0.6117435",
"0.6109983",
"0.6109983",
"0.60945934",
"0.60858583",
"0.60583323",
"0.6055506",
"0.60272396",
"0.599262",
"0.5988058",
"0.5968688"... | 0.81983185 | 0 |
Enqueue a task on a remote filesystem | def enqueue_remote(calc_dir, host, user):
from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand
ssh = getSSHClient(host, user)
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
q = getQueue(ssh)
if q is None:
mbox... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enqueue(self, fn):\n self.queue.put(fn)",
"def enqueue(self, url, path, check_val):\n logger.debug(\"Enqueuing new task (total: {0})\".format(\n self._dwq.qsize() + 1))\n self._dwq.put((url, path, check_val, 1))",
"def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)... | [
"0.62454194",
"0.6152354",
"0.6060588",
"0.6032706",
"0.591759",
"0.5790725",
"0.57750595",
"0.574475",
"0.5740375",
"0.5739555",
"0.57374674",
"0.5684687",
"0.55867654",
"0.5584851",
"0.5565949",
"0.5563195",
"0.5535499",
"0.5512124",
"0.54877776",
"0.5481692",
"0.5475199",
... | 0.68370694 | 0 |
Get instance of select. | def get_select_instance(self) -> Select:
element = self.wait_until_loaded()
return Select(element) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_selenium_select(self):\n\n if self.exists():\n\n element = self.element()\n\n if element.tag_name == u'select':\n return SeleniumSelect(element)",
"def select(cls, *flst):\n cls.runtime.set_select(flst)\n return SelectQuery(cls.runtime)",
"def ... | [
"0.70057505",
"0.6398658",
"0.6194344",
"0.6185794",
"0.6062771",
"0.6035689",
"0.6024519",
"0.6024519",
"0.59566975",
"0.5862003",
"0.5850936",
"0.58454293",
"0.5813995",
"0.57389367",
"0.56362617",
"0.55677134",
"0.5539791",
"0.5501766",
"0.5501766",
"0.5473336",
"0.545063"... | 0.8125689 | 0 |
Convert obj into Status. | def as_status(cls, obj):
if obj is None: return None
return obj if isinstance(obj, cls) else cls.from_string(obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_status_obj(self):\n\n status = Status(self._config.dirout, name=self._config.name,\n hardware=self._config.hardware)\n return status",
"def get_object_status(obj):\n return get_object_parameter(obj, 'status')",
"def save_object(self, data):\n return Statu... | [
"0.69012076",
"0.6826973",
"0.68099135",
"0.5904176",
"0.5852035",
"0.56639117",
"0.56094617",
"0.55938005",
"0.5563878",
"0.548514",
"0.5456211",
"0.54496115",
"0.54476863",
"0.5410725",
"0.5410167",
"0.5387186",
"0.53415185",
"0.5339267",
"0.53245735",
"0.531709",
"0.531315... | 0.8367584 | 0 |
Return a `Status` instance from its string representation. | def from_string(cls, s):
for num, text in cls._STATUS2STR.items():
if text == s:
return cls(num)
else:
raise ValueError("Wrong string %s" % s) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_string(cls, name: str) -> Enum:",
"def from_str(cls, string):",
"def from_dict(cls, dikt) -> 'Status':\n return deserialize_model(dikt, cls)",
"def as_status(cls, obj):\n if obj is None: return None\n return obj if isinstance(obj, cls) else cls.from_string(obj)",
"def from_dic... | [
"0.6532725",
"0.6098119",
"0.6089204",
"0.6071723",
"0.6062613",
"0.6039817",
"0.60321283",
"0.5979478",
"0.58530563",
"0.5814466",
"0.57564574",
"0.5747435",
"0.57120544",
"0.563867",
"0.56150186",
"0.55646116",
"0.55443376",
"0.5541977",
"0.5534263",
"0.5529887",
"0.5467418... | 0.7688771 | 0 |
List of strings with all possible values status. | def all_status_strings(cls):
return [info[1] for info in cls._STATUS_INFO] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getStatusValues(self):\n return []",
"def get_all_status():\n return \"\"",
"def valid_statuses(self):\n return [\n \"dish_maintenance\",\n \"dish_ok\",\n \"RF_maintenance\",\n \"RF_ok\",\n \"digital_maintenance\",\n \"digital_o... | [
"0.80372065",
"0.74304366",
"0.7289085",
"0.697477",
"0.6933288",
"0.6933288",
"0.6818656",
"0.6688988",
"0.6659315",
"0.6659315",
"0.6566149",
"0.65324384",
"0.6446381",
"0.6437433",
"0.6356403",
"0.63014066",
"0.6281931",
"0.62613416",
"0.62507194",
"0.6203024",
"0.61833817... | 0.8151018 | 0 |
True if status is critical. | def is_critical(self):
return str(self) in ("AbiCritical", "QCritical", "Unconverged", "Error") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def life_critical():\n return True",
"async def is_water_level_critical(self):\n entity_id = self._hass.data[DOMAIN][ATTR_WATER_LEVEL_CRITICAL_ENTITY_ID]\n return entity_id and self._hass.states.get(entity_id).state == STATE_ON",
"def critical(self) -> Optional[pulumi.Input['InfraAlertConditio... | [
"0.6534846",
"0.64763504",
"0.6444272",
"0.6444272",
"0.64000493",
"0.6359411",
"0.6297349",
"0.62813634",
"0.61272347",
"0.60135937",
"0.5963335",
"0.595614",
"0.59326184",
"0.5897819",
"0.58703893",
"0.58697253",
"0.5852167",
"0.575893",
"0.5719633",
"0.5684549",
"0.5680519... | 0.74858505 | 0 |
Initialize an instance of `NodeResults` from a `Node` subclass. | def from_node(cls, node):
kwargs = dict(
node_id=node.node_id,
node_finalized=node.finalized,
node_history=list(node.history),
node_name=node.name,
node_class=node.__class__.__name__,
node_status=str(node.status),
)
return ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, tree, result, url):\n self.tree = tree\n self.result = result\n self.url = url",
"def __init__(self) -> None:\n\t\t# Call super\n\t\tsuper(RootNode, self).__init__()\n\t\tself.nodes: List[Node] = []\n\t\tself.subfiles: Set[str] = set()",
"def __init__(self, start_index=N... | [
"0.5823575",
"0.5784935",
"0.57507306",
"0.5743493",
"0.56789154",
"0.55787575",
"0.5556027",
"0.55545753",
"0.5540183",
"0.5491495",
"0.5489627",
"0.548306",
"0.5457399",
"0.5450769",
"0.5444015",
"0.5427575",
"0.5427209",
"0.5423883",
"0.54184836",
"0.54181236",
"0.5371449"... | 0.6979888 | 0 |
List with the absolute paths of the files to be put in GridFs. | def gridfs_files(self):
return self["files"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filepaths(self):\n pass",
"def list_files(self):\n ret = []\n for fname in self.files:\n ret.append('filename: %s\\t replica locations: %s' %\n (fname, ','.join(self.files[fname])))\n return ret",
"def listFiles(self):\n pass",
"def getFiles(se... | [
"0.70887214",
"0.68611246",
"0.6793429",
"0.666335",
"0.6587119",
"0.65812594",
"0.6568302",
"0.65522903",
"0.65522903",
"0.65522903",
"0.6527277",
"0.6510298",
"0.6502833",
"0.64978254",
"0.6486751",
"0.64842963",
"0.64746535",
"0.64709234",
"0.646373",
"0.6423753",
"0.63345... | 0.73669624 | 0 |
This function registers the files that will be saved in GridFS. kwargs is a dictionary mapping the key associated to the file (usually the extension) to the absolute path. By default, files are assumed to be in binary form, for formatted files one should pass a tuple ("filepath", "t"). | def register_gridfs_files(self, **kwargs):
d = {}
for k, v in kwargs.items():
mode = "b"
if isinstance(v, (list, tuple)): v, mode = v
d[k] = GridFsFile(path=v, mode=mode)
self["files"].update(d)
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ugs_save_file_on_filesystem_hook(*args, **kwargs):\n\n if len(args) == 1 and isinstance(args[0], FrappeFileDoc):\n # We are being called from a file-type doc\n ret = args[0].save_file_on_filesystem()\n else:\n ret = frappe.utils.file_manager.save_file_on_filesystem(*args, **kwargs)\n... | [
"0.61494565",
"0.57136476",
"0.55193377",
"0.5482238",
"0.5418756",
"0.5412003",
"0.5402928",
"0.53940064",
"0.53582376",
"0.53195375",
"0.52941066",
"0.5292922",
"0.5292611",
"0.5275253",
"0.5264585",
"0.5251641",
"0.52443796",
"0.5214584",
"0.52058154",
"0.5199464",
"0.5187... | 0.7732723 | 0 |
Check whether the node is a instance of `class_or_string`. Unlinke the standard isinstance builtin, the method accepts either a class or a string. In the later case, the string is compared with self.__class__.__name__ (case insensitive). | def isinstance(self, class_or_string):
if class_or_string is None:
return False
import inspect
if inspect.isclass(class_or_string):
return isinstance(self, class_or_string)
else:
return self.__class__.__name__.lower() == class_or_string.lower() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def type_or_class_match(node_a, node_b):\n if isinstance(node_b['node'], type):\n return issubclass(type(node_a['node']), node_b['node'])\n elif isinstance(node_a['node'], type):\n return issubclass(type(node_b['node']), node_a['node'])\n elif isinstance(node_b['node'], xf.PatternNode):\n ... | [
"0.6436535",
"0.5984461",
"0.58243",
"0.58243",
"0.58243",
"0.58243",
"0.58243",
"0.58243",
"0.5785469",
"0.5769264",
"0.57563615",
"0.57536185",
"0.57536185",
"0.57398254",
"0.57315344",
"0.5721359",
"0.57201725",
"0.5707082",
"0.5687554",
"0.5687518",
"0.56612915",
"0.564... | 0.8296064 | 0 |
Convert obj into a Node instance. | def as_node(cls, obj):
if isinstance(obj, cls):
return obj
elif is_string(obj):
# Assume filepath.
return FileNode(obj)
elif obj is None:
return obj
else:
raise TypeError("Don't know how to convert %s to Node instance." % obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fromObj(self, obj):\n for k in BaseNode.SERIALIZABLE_PROPERTIES:\n if k in obj:\n # work around for migrate nodeInfo class\n if k == \"nodeInfo\":\n if isinstance(obj[k], dict):\n obj[k] = NodeInfo(obj[k][\"showInputs\"],... | [
"0.6805639",
"0.61491376",
"0.5969413",
"0.58747584",
"0.5704297",
"0.5641489",
"0.56377715",
"0.5610259",
"0.55141354",
"0.54903036",
"0.547982",
"0.54648393",
"0.5442755",
"0.5429253",
"0.5429165",
"0.53956425",
"0.5392377",
"0.5380667",
"0.5366155",
"0.53627306",
"0.535577... | 0.8197142 | 0 |
Return a relative version of the workdir | def relworkdir(self):
if getattr(self, "workdir", None) is None:
return None
try:
return os.path.relpath(self.workdir)
except OSError:
# current working directory may not be defined!
return self.workdir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rel_cwd():\n return os.path.relpath(os.getcwd(), git_toplevel())",
"def build_relpath(self):\n return join_path(\"..\", self.build_dirname)",
"def pathtofolder():\n return os.getcwd()",
"def relDir(self, cwd=None, root=None):\n return os.path.dirname(self.relName(cwd, root)) or \".\"",
... | [
"0.7572184",
"0.74179256",
"0.71759975",
"0.7152729",
"0.7115556",
"0.71097624",
"0.7082853",
"0.7065006",
"0.70645964",
"0.7047222",
"0.70466393",
"0.7030355",
"0.70125103",
"0.70114654",
"0.70030177",
"0.70021874",
"0.6992148",
"0.69542587",
"0.69249356",
"0.6917377",
"0.68... | 0.751337 | 1 |
Set the node identifier. Use it carefully! | def set_node_id(self, node_id):
self._node_id = node_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def node_id(self, node_id: int):\r\n self._node_id = node_id",
"def set_id(self, refobj, identifier):\n cmds.setAttr(\"%s.identifier\" %refobj, identifier)",
"def node_id(self, node_id):\n\n self._node_id = node_id",
"def setNodeId(self, recId):\n if self.cursor:\n self... | [
"0.8026486",
"0.77016544",
"0.7650171",
"0.7358727",
"0.70628834",
"0.7047012",
"0.6896155",
"0.6876746",
"0.6874612",
"0.6835747",
"0.67891157",
"0.67891157",
"0.67755765",
"0.67752916",
"0.67473626",
"0.67473626",
"0.6714017",
"0.6624899",
"0.6569984",
"0.6569984",
"0.65699... | 0.778731 | 1 |
True if this node is a file | def is_file(self):
return isinstance(self, FileNode) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_file(self):\n return self.type == \"file\"",
"def is_file(self):\n return self.tipo == 'file' or self.tipo is None",
"def is_eficas_file(node):\n return (node.get_attr(Type).read() == EficasFile.ftype)",
"def is_file(self):\n return not self.is_directory",
"def isfile(self):\... | [
"0.823697",
"0.8187442",
"0.79591274",
"0.7515635",
"0.7338623",
"0.72390836",
"0.7206723",
"0.7115239",
"0.70083743",
"0.69566363",
"0.68873036",
"0.6885846",
"0.68853",
"0.68246317",
"0.6806279",
"0.67822725",
"0.6668802",
"0.66520804",
"0.6606101",
"0.660243",
"0.65713197"... | 0.90234685 | 0 |
True if this node is a Task | def is_task(self):
from .tasks import Task
return isinstance(self, Task) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_task_stagnant(task):",
"def is_task(self, task_id, tasks):\r\n for t in tasks:\r\n if t.id == task_id:\r\n return True\r\n return False",
"def _is_python_task(task, pidstr):\n if str(task.pid) != pidstr:\n return False\n else:\n return True",
... | [
"0.7443283",
"0.6947819",
"0.6818511",
"0.6777766",
"0.6695782",
"0.6610532",
"0.65350896",
"0.6530874",
"0.64364797",
"0.6271983",
"0.622673",
"0.62005734",
"0.6075867",
"0.5960482",
"0.59500843",
"0.5931306",
"0.5922021",
"0.58936006",
"0.58829546",
"0.5872167",
"0.5845682"... | 0.8342237 | 0 |
True if this node is a Work | def is_work(self):
from .works import Work
return isinstance(self, Work) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isNodeType(self, t):\n return isinstance(self, t)",
"def workable(self) -> bool:\n return self._strategy.workable",
"def is_worker_thread():\n try:\n return worker_thread_data.is_worker_thread\n except AttributeError:\n return False",
"def is_task(self):\n from .t... | [
"0.63182133",
"0.6092663",
"0.59042835",
"0.5891577",
"0.57657576",
"0.5746107",
"0.5746107",
"0.5746107",
"0.57207537",
"0.56583154",
"0.5650686",
"0.56354994",
"0.562447",
"0.562447",
"0.56225556",
"0.55459434",
"0.55150414",
"0.5466359",
"0.54564786",
"0.544544",
"0.541686... | 0.80919564 | 0 |
True if this node is a Flow | def is_flow(self):
from .flows import Flow
return isinstance(self, Flow) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isFlow(self) -> bool:\n ...",
"def isNodeType(self, t):\n return isinstance(self, t)",
"def _is_sink() -> bool:\n\n def _is_inplace(n: Node):\n \"\"\"Get the inplace argument from ``torch.fx.Node``\n \"\"\"\n inplace = False\n ... | [
"0.82888186",
"0.6625373",
"0.6044185",
"0.5894704",
"0.58589464",
"0.5831481",
"0.58194137",
"0.58000535",
"0.5776091",
"0.57718563",
"0.5769203",
"0.5670171",
"0.5609499",
"0.56082875",
"0.5608028",
"0.55946344",
"0.5508543",
"0.5454714",
"0.5452838",
"0.5450906",
"0.543212... | 0.82321334 | 1 |
Returns a list with the status of the dependencies. | def deps_status(self):
if not self.deps:
return [self.S_OK]
return [d.status for d in self.deps] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDependenciesList(self) -> List[Mapping[Any, Any]]:\n if self._dependencyList is not None:\n return self._dependencyList\n\n chartfile = self.getChartFile()\n if chartfile['apiVersion'] == 'v2':\n if 'dependencies' in chartfile:\n self._dependencyList... | [
"0.68876165",
"0.6757046",
"0.66798264",
"0.65741026",
"0.6442077",
"0.64086777",
"0.6375406",
"0.6356302",
"0.6338329",
"0.629271",
"0.6290646",
"0.6248928",
"0.61763686",
"0.6158371",
"0.6153964",
"0.6139203",
"0.613682",
"0.61277705",
"0.61259836",
"0.61249816",
"0.608106"... | 0.8783236 | 0 |
True if this node depends on the other node. | def depends_on(self, other):
return other in [d.node for d in self.deps] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def depends((a, b)):\r\n return (any(bout in a.inputs for bout in b.outputs)\r\n or any(depends((ainp.owner, b)) for ainp in a.inputs\r\n if ainp.owner))",
"def equiv(self, other):\n # FUTURE: once the PiplelineState nodes attribute stores multiple modules,\n ... | [
"0.63062394",
"0.62011176",
"0.61985177",
"0.6183282",
"0.6183282",
"0.6178362",
"0.61306155",
"0.60825515",
"0.6065093",
"0.6065093",
"0.6033321",
"0.6033321",
"0.6020656",
"0.6002114",
"0.5981491",
"0.5935776",
"0.5914485",
"0.5895294",
"0.5885986",
"0.5883382",
"0.58295155... | 0.805279 | 0 |
Return the string representation of the dependencies of the node. | def str_deps(self):
lines = []
app = lines.append
app("Dependencies of node %s:" % str(self))
for i, dep in enumerate(self.deps):
app("%d) %s, status=%s" % (i, dep.info, str(dep.status)))
return "\n".join(lines) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_deps(self):\n\t\tprint self.deps, '\\n'",
"def __str__(self):\n stringRepresentation = []\n for node in self.getNodes():\n stringRepresentation.append(\"->\".join(\n (str(node), str(self.graph[node]))))\n\n return str(stringRepresentation)",
"def compone... | [
"0.668987",
"0.66223913",
"0.6401555",
"0.6358125",
"0.62786525",
"0.62584203",
"0.62463415",
"0.62463415",
"0.6243726",
"0.62194383",
"0.6207523",
"0.6207523",
"0.6144341",
"0.6044976",
"0.60416764",
"0.60378635",
"0.60351294",
"0.6028271",
"0.6021468",
"0.6000513",
"0.59810... | 0.8653706 | 0 |
Return pandas DataFrame with the value of the variables specified in `varnames`. Can be used for task/works/flow. It's recursive! | def get_vars_dataframe(self, *varnames):
import pandas as pd
if self.is_task:
df = pd.DataFrame([{v: self.input.get(v, None) for v in varnames}], index=[self.name], columns=varnames)
df["class"] = self.__class__.__name__
return df
elif self.is_work:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_df(raw_data, variables):\n vars = []\n for v in requirement_order(variables):\n vars.append(v(raw_data))\n return pd.concat(vars, axis=1)",
"def convert_variables_to_dataframe(self, variables: Variables):\n records = []\n for (year, variable_name), variable in variables.ite... | [
"0.634334",
"0.6127457",
"0.54215074",
"0.53402454",
"0.5336286",
"0.53129846",
"0.52937585",
"0.52844363",
"0.5279209",
"0.52531093",
"0.52418345",
"0.51818794",
"0.5146553",
"0.5134265",
"0.50864273",
"0.50787354",
"0.5029223",
"0.50172925",
"0.5016772",
"0.49703383",
"0.49... | 0.74703014 | 0 |
Set the garbage collector. | def set_gc(self, gc):
assert isinstance(gc, GarbageCollector)
self._gc = gc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def garbage_collectors(self, garbage_collectors):\n\n self._garbage_collectors = garbage_collectors",
"def __init__(self, gc):\n self.gc = gc",
"def device_gc():\n safe_call(backend.get().af_device_gc())",
"def collect_garbage(self) -> None:\n pass",
"def _run_garbage_collection():\... | [
"0.72043836",
"0.6208546",
"0.61036986",
"0.6040764",
"0.5938195",
"0.5892014",
"0.56413096",
"0.56100863",
"0.55091774",
"0.5441686",
"0.5405389",
"0.5392927",
"0.5261957",
"0.5261957",
"0.5261957",
"0.5261957",
"0.5261957",
"0.5261957",
"0.52601856",
"0.5254886",
"0.524453"... | 0.7417656 | 0 |
Garbage collector. None if garbage collection is deactivated. Use flow.set_garbage_collector to initialize the object. | def gc(self):
try:
return self._gc
except AttributeError:
#if not self.is_flow and self.flow.gc: return self.flow.gc
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collect_garbage(self) -> None:\n pass",
"def __init__(self, gc):\n self.gc = gc",
"def set_gc(self, gc):\n assert isinstance(gc, GarbageCollector)\n self._gc = gc",
"def device_gc():\n safe_call(backend.get().af_device_gc())",
"def garbage_collectors(self, garbage_collect... | [
"0.68191624",
"0.669241",
"0.64943933",
"0.63076013",
"0.6256203",
"0.6121257",
"0.61030954",
"0.6062495",
"0.5842175",
"0.57076174",
"0.57047695",
"0.5668936",
"0.5641246",
"0.56166315",
"0.55038536",
"0.54899156",
"0.5474436",
"0.5462704",
"0.5462704",
"0.5421219",
"0.54023... | 0.7802732 | 0 |
Install the `EventHandlers for this `Node`. If no argument is provided the default list of handlers is installed. | def install_event_handlers(self, categories=None, handlers=None):
if categories is not None and handlers is not None:
raise ValueError("categories and handlers are mutually exclusive!")
from .events import get_event_handler_classes
if categories:
raise NotImplementedErro... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addHandlers(self, handlers):\n self._eventHandlers.update(handlers)\n keys = self._eventHandlers.keys()\n pygame.event.set_allowed(keys)",
"def setupInputEventHandlers(self):\n\n default.Script.setupInputEventHandlers(self)\n self.inputEventHandlers.update(\n sel... | [
"0.6414835",
"0.6037302",
"0.5909204",
"0.59006244",
"0.57668835",
"0.57453233",
"0.57121414",
"0.5681006",
"0.5673216",
"0.5600916",
"0.5572513",
"0.5570844",
"0.5547228",
"0.5545235",
"0.55310273",
"0.5432351",
"0.5400709",
"0.53988016",
"0.5386339",
"0.53185946",
"0.530674... | 0.7356163 | 0 |
Print to `stream` the event handlers installed for this flow. | def show_event_handlers(self, stream=sys.stdout, verbose=0):
lines = ["List of event handlers installed:"]
for handler in self.event_handlers:
if verbose:
lines.extend(handler.__class__.cls2str().split("\n"))
else:
lines.extend(str(handler).split("... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_stream_handler(self):\n sh = logging.StreamHandler()\n sh.setFormatter(logging.Formatter(self.fmt, datefmt=self.date_fmt))\n self.addHandler(sh)",
"def set_stream(self, stream):\n\n for handler in self.handlers[:]:\n if isinstance(handler, logging.StreamHandler):\n ... | [
"0.61495626",
"0.61448324",
"0.5951412",
"0.5731857",
"0.5662934",
"0.5529747",
"0.5442177",
"0.54411954",
"0.5413522",
"0.5389675",
"0.5352938",
"0.5295455",
"0.52764785",
"0.52754617",
"0.5269171",
"0.5219142",
"0.52166575",
"0.5141776",
"0.5137113",
"0.51314557",
"0.510522... | 0.81538296 | 0 |
Basename of the file. | def basename(self):
return os.path.basename(self.filepath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def basename(self):\n return get_basename(self.filename)",
"def basename(self):\n return self.name.basename",
"def get_file_basename(self):\n return self._basename[:]",
"def basename(self) -> str:\n return self._basename",
"def basename(self):\n return self._basename",
... | [
"0.82301086",
"0.798624",
"0.79570544",
"0.7948017",
"0.7867619",
"0.7821296",
"0.77957064",
"0.77522737",
"0.7751202",
"0.7596237",
"0.7540688",
"0.75255793",
"0.750992",
"0.74643725",
"0.73821795",
"0.7343527",
"0.7268083",
"0.7236085",
"0.7234416",
"0.7201531",
"0.7180847"... | 0.82075614 | 1 |
Add a node (usually Task) to the children of this FileNode. | def add_filechild(self, node):
self._filechildren.append(node) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_child(self, node):\n self.children.append(node)",
"def add_child(self, node):\n self.children.append(node)",
"def add_child(self, node):\n self.children.append(node)",
"def _add_child(self, node):\n self.children.update({\n node.name: node\n })\n n... | [
"0.7421095",
"0.7421095",
"0.7421095",
"0.7351981",
"0.7321027",
"0.71562696",
"0.70877254",
"0.6899852",
"0.68100643",
"0.67405903",
"0.6732843",
"0.6729451",
"0.66640645",
"0.66608375",
"0.6646832",
"0.6642311",
"0.65983003",
"0.65927315",
"0.6577577",
"0.65509397",
"0.6517... | 0.8034507 | 0 |
List with the children (nodes) of this FileNode. | def filechildren(self):
return self._filechildren | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def children(self):\n return list(self._children)",
"def children(self) -> List[str]:\n return self._children",
"def get_children(self):\n return []",
"def get_children(self):\n\n pass",
"def children(self):\n \n return self._children",
"def children(self):\n ... | [
"0.7844669",
"0.7693783",
"0.7595761",
"0.75668776",
"0.7560855",
"0.7555366",
"0.7555366",
"0.75442153",
"0.75434875",
"0.754045",
"0.754045",
"0.754045",
"0.7537011",
"0.7498799",
"0.74978864",
"0.74409",
"0.741199",
"0.74040014",
"0.73515093",
"0.7317118",
"0.7292258",
"... | 0.799188 | 0 |
Log 'msg % args' with the critical severity level | def critical(self, msg, *args, **kwargs):
self._log("CRITICAL", msg, args, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def critical(self, *args, **kwargs):\n self.msg(logging.CRITICAL, *args, **kwargs)",
"def critical(self, msg, *args, **kwargs):\n self._logger.critical(msg, *args, **kwargs)",
"def critical(self, msg, *args):\n if self.lvl<=logging.CRITICAL: return self._log(msg, *args)",
"def critical(self,... | [
"0.83006346",
"0.7913172",
"0.78587925",
"0.7817943",
"0.763236",
"0.75481474",
"0.7516018",
"0.7500672",
"0.7429482",
"0.73415476",
"0.727257",
"0.71751595",
"0.69875693",
"0.6929796",
"0.6856171",
"0.6827632",
"0.6792363",
"0.6784813",
"0.6758549",
"0.6689472",
"0.6678434",... | 0.812759 | 1 |
Save the id of the last node created. | def save_lastnode_id():
init_counter()
with FileLock(_COUNTER_FILE):
with AtomicFile(_COUNTER_FILE, mode="w") as fh:
fh.write("%d\n" % _COUNTER) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __save_node(self):\n print(self._encoder.encode(self._current_node))\n self._count += 1",
"def save_node(self, node: Node):",
"def save_node(self):\n # save node in path2node\n if self.full_path in self.file.path2node:\n print \"** Error, created node with path twice:... | [
"0.6517922",
"0.65173435",
"0.64769125",
"0.6426654",
"0.614686",
"0.6133911",
"0.608231",
"0.6079285",
"0.60323066",
"0.60263675",
"0.6023729",
"0.6021255",
"0.5956886",
"0.59333175",
"0.5909313",
"0.58732355",
"0.58603567",
"0.5828876",
"0.57975936",
"0.57755256",
"0.577539... | 0.723087 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.