query stringlengths 9 9.05k | document stringlengths 10 222k | metadata dict | negatives listlengths 30 30 | negative_scores listlengths 30 30 | document_score stringlengths 4 10 | document_rank stringclasses 2
values |
|---|---|---|---|---|---|---|
Check if the line count is correct or not | def test_line_count(self):
self.assertEqual(analyse_text(self.filename)[0], 4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)",
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)",
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 11)",
"def check_Lines(self):\n\n p... | [
"0.8300202",
"0.8300202",
"0.82924354",
"0.74480903",
"0.71274304",
"0.6892934",
"0.67752284",
"0.67375344",
"0.6687496",
"0.6650943",
"0.6615737",
"0.65877765",
"0.6544095",
"0.64498615",
"0.6419295",
"0.63805336",
"0.6351545",
"0.63380176",
"0.62988913",
"0.62775236",
"0.62... | 0.8300399 | 0 |
Check the input file doesn't get deleted | def test_no_deletion(self):
analyse_text(self.filename)
self.assertTrue(os.path.exists(self.filename)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_no_delete(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))",
"def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))",
"def test_no_deletion(self):\n analyze_text(self.filename)\... | [
"0.7467014",
"0.74577135",
"0.74577135",
"0.7248804",
"0.69936264",
"0.6976359",
"0.6826063",
"0.6763073",
"0.6734737",
"0.6712128",
"0.6659804",
"0.6656828",
"0.6648759",
"0.6610986",
"0.6586217",
"0.658015",
"0.6540549",
"0.6538399",
"0.6528681",
"0.6508493",
"0.65010065",
... | 0.74659413 | 1 |
snap_verts Snap verts that are within tolerance meters of each other. Description Snapping should be performed with a very small tolerance. The goal is not to change the network, but to ensure rounding errors don't prevent edges from being split at proper intersections. The default of 1mm should be adequate if the inpu... | def snap_verts(shp,tolerance=0.001,arc=True):
kmtol = tolerance/1000.
data = numpy.concatenate([rec.vertices for rec in shp])
if arc:
kd = pysal.cg.KDTree(data,distance_metric="Arc",radius = pysal.cg.sphere.RADIUS_EARTH_KM)
else:
kd = pysal.cg.KDTree(data)
q = kd.query_ball_tre... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def snap_edges(edges, tolerance=DEFAULT_SNAP_TOLERANCE):\n v, h = [list(filter(lambda x: x[\"orientation\"] == o, edges)) for o in (\"v\", \"h\")]\n\n snap = snap_objects\n snapped = snap(v, \"x0\", tolerance) + snap(h, \"top\", tolerance)\n return snapped",
"def snap_to_line_old(points, lines, toler... | [
"0.6208411",
"0.53236127",
"0.5058674",
"0.5051121",
"0.50249195",
"0.50117344",
"0.48925573",
"0.47068614",
"0.45114636",
"0.45089692",
"0.4417379",
"0.44078958",
"0.44034535",
"0.43965262",
"0.43593574",
"0.4238884",
"0.42073944",
"0.4195878",
"0.41946483",
"0.416543",
"0.4... | 0.6914243 | 0 |
find_nodes Finds vertices in a line type shapefile that appear more than once and/or are end points of a line Arguments shp Shapefile Object Should be of type Line. Returns set | def find_nodes(shp):
node_count = {}
for road in shp:
vrts = road.vertices
for node in vrts:
if node not in node_count:
node_count[node] = 0
node_count[node] += 1
node_count[vrts[0]] += 1
node_count[vrts[-1]] += 1
return set([node for n... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes... | [
"0.59690547",
"0.55561215",
"0.546086",
"0.5417645",
"0.5314374",
"0.5297645",
"0.52889955",
"0.51830107",
"0.5161922",
"0.5151241",
"0.5110149",
"0.5090524",
"0.5085374",
"0.506049",
"0.5047005",
"0.5022949",
"0.5022422",
"0.5014972",
"0.4973013",
"0.4948021",
"0.4903102",
... | 0.64602196 | 0 |
split_at_nodes Split line features at nodes Arguments shp list or shapefile Chain features to be split at common nodes. Returns generator yields pysal.cg.Chain objects | def split_at_nodes(shp):
nodes = find_nodes(shp)
nodeIds = list(nodes)
nodeIds.sort()
nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])
for road in shp:
vrts = road.vertices
midVrts = set(road.vertices[1:-1]) #we know end points are nodes
midNodes = midVrts.int... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_split(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", 0)\n split = onnx_node.getattr(\"split\", None)\n num_output = len(onnx_node.outputs)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n ... | [
"0.5709366",
"0.5100523",
"0.50354594",
"0.5005691",
"0.4956179",
"0.4929372",
"0.49258184",
"0.49091634",
"0.48914868",
"0.48850974",
"0.4871217",
"0.48550192",
"0.48419514",
"0.47978392",
"0.47974023",
"0.47971427",
"0.47869164",
"0.47591326",
"0.47351238",
"0.4734362",
"0.... | 0.71027136 | 0 |
Fitness is the inverse of the number of conflicts. | def get_fitness(self):
hard_conflicts = self.get_conflicts()
soft_conflicts = self.get_soft_conflicts()
hard_fitness = 1 / hard_conflicts if hard_conflicts != 0 else math.inf
soft_fitness = 1 / soft_conflicts if soft_conflicts != 0 else math.inf
return [hard_fitness, soft_fitness... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fitness(self):\n # TO BE DECIDED\n return 1",
"def fitness(self):\n pass",
"def _calculate_fitness(self):\n pass",
"def _recompute_fitness(self):\n for cur in self.population:\n if cur['fitness'] is None:\n cur['fitness'] = self.op.fitness(cur[... | [
"0.7400809",
"0.71090883",
"0.7051153",
"0.6702002",
"0.6494779",
"0.64586675",
"0.6426077",
"0.6370176",
"0.63033736",
"0.6300053",
"0.6292698",
"0.62820524",
"0.628173",
"0.62036395",
"0.6190891",
"0.6169736",
"0.61475295",
"0.6096302",
"0.607338",
"0.6044294",
"0.60197985"... | 0.7290288 | 1 |
Handler for the command It creates topic structure for the given topics in csv file and adds outcomes to the leaf node | def handle(self, *args, **options):
csv_filename = options['file_name']
parent_name = options['parent']
levels = 0
with open(csv_filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headings = next(csv_reader)
# Determine the topic... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create():\n logging.info('\"Create\" task started using config file %s', args.config)\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n # Store a dictionary of id_field values: node IDs so we can add child nodes.\n node_ids = dict()\n\... | [
"0.5957159",
"0.56996644",
"0.5595819",
"0.5556172",
"0.53115696",
"0.5231632",
"0.5207633",
"0.5198197",
"0.5174824",
"0.51507366",
"0.51286167",
"0.5118943",
"0.5106111",
"0.50699234",
"0.5052287",
"0.504027",
"0.5022299",
"0.49730396",
"0.49713245",
"0.49529165",
"0.492699... | 0.6889117 | 0 |
Tidy a telephone number. Weed out some obvious bad data. | def tidy_telephone(telephone):
junk = ['none', 'none1', 'na', 'n/a', 'same', 'yes', 'cell', 'offsite']
telephone = telephone.replace('xxx-xxx-xxxx', '')
telephone = telephone.replace('ext', ' x')
telephone = telephone.replace(' cell', '')
telephone = telephone.replace('"', '')
telephone = teleph... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_phone(number):\n numberlist = re.findall(\"\\d\",number)\n new_number = \"\".join(numberlist)\n if len(new_number) == 8:\n \tnew_number = \"010\" + new_number\n\tnew_number = new_number[-11:]\n\tif new_number.startswith('1'):\n\t\tnew_number = \"+86-\" + new_number\n\telse:\n\t\tnew_number = ... | [
"0.6952731",
"0.6931968",
"0.66633725",
"0.6572535",
"0.6570305",
"0.65670294",
"0.64891815",
"0.6423277",
"0.64005715",
"0.6346448",
"0.6329825",
"0.6327053",
"0.6323683",
"0.62718356",
"0.6238897",
"0.6238636",
"0.6234571",
"0.62083876",
"0.6075161",
"0.60560757",
"0.602125... | 0.7598371 | 0 |
Parse an address. Usually has three or two parts, with the final part being 'City, State Zip' Some have less parts. Some are nonUSA (perhaps if no 2character state and zip). | def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):
address = {}
errors = []
parts = address_str.split('$')
if DEBUG:
address['debug_address_str'] = address_str
address['debug_part_1'] = parts[0]
address['debug_part_last'] = parts[-1]
address[... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_address(address: str) -> OrderedDict[str, str]:\n\n address = address.replace(\"\\n\", \", \")\n\n parsed_address, address_type = usaddress.tag(address)\n if address_type != \"Street Address\":\n logger.warning(\n f\"Couldn't parse address '{address}' of type {address_type}; be... | [
"0.7450505",
"0.7034624",
"0.6786682",
"0.676242",
"0.6687193",
"0.667537",
"0.6564549",
"0.6319824",
"0.62784386",
"0.6259219",
"0.62310874",
"0.61880517",
"0.6131252",
"0.61060214",
"0.6036868",
"0.6029198",
"0.6013518",
"0.6011449",
"0.598841",
"0.58983797",
"0.5890547",
... | 0.739219 | 1 |
Parse an address. Type 2 = Campus. First look for "room" on its own, then for delimited "building,room". | def parse_address_campus(address_str, address_campus_re, address_campus_room_re):
address = {}
errors = []
if '$' not in address_str:
match = re.search(address_campus_room_re, address_str)
if match:
address['addressLine1'] = match.group(1)
else:
# This leftove... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_address(address: str) -> OrderedDict[str, str]:\n\n address = address.replace(\"\\n\", \", \")\n\n parsed_address, address_type = usaddress.tag(address)\n if address_type != \"Street Address\":\n logger.warning(\n f\"Couldn't parse address '{address}' of type {address_type}; be... | [
"0.60765177",
"0.60365295",
"0.60231984",
"0.59253734",
"0.5881075",
"0.58416593",
"0.5841651",
"0.5807531",
"0.57222366",
"0.5652957",
"0.56317115",
"0.55997515",
"0.5580387",
"0.5526875",
"0.5515271",
"0.5487256",
"0.5456443",
"0.54219383",
"0.537054",
"0.5349834",
"0.53455... | 0.69196 | 0 |
Determine the preferredContactTypeId Use "email" if it does exist, else "mail" even if address/phone does not exist. | def determine_preferred_contact(user_data):
try:
user_data['personal']['email']
except KeyError:
preferred_contact = 'mail'
else:
preferred_contact = 'email'
return preferred_contact | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_type(self):\n types = dict(ADDRESS_TYPE_CHOICES)\n return types.get(self.address_type, \"N/A\")",
"def scheme_type(self) -> Union[str, None]:\n if self.scheme is None:\n return None\n\n if \"contact_1\" in self.scheme and \"contact_2\" in self.scheme:\n r... | [
"0.5919622",
"0.5790863",
"0.54554784",
"0.5440097",
"0.5357837",
"0.53405076",
"0.5333537",
"0.5288933",
"0.5283198",
"0.52559656",
"0.52522856",
"0.5250436",
"0.52444154",
"0.51999366",
"0.51787084",
"0.5124692",
"0.5112101",
"0.51028335",
"0.5095454",
"0.5068635",
"0.50686... | 0.7721516 | 0 |
Load the data file map of barcode,uuid Enables reloading of users data. | def load_uuid_map(input_fn):
with open(input_fn) as input_fh:
fieldnames = ['barcode', 'uuid']
reader = csv.DictReader(input_fh, fieldnames=fieldnames)
uuids = {}
for row in reader:
uuids[row['barcode']] = row['uuid']
return uuids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadIdMap(self, filename:str) -> None :\n if(not isinstance(filename,str)):\n raise TypeError(\"filename must be a string but %s was passed\"%str(type(filename)))\n if(not os.path.exists(filename) or not os.path.isfile(filename)):\n raise ValueError(\"invalid filename\")\n\n... | [
"0.59658784",
"0.59335923",
"0.5854021",
"0.5852161",
"0.5834609",
"0.5776097",
"0.57309085",
"0.5659113",
"0.55741054",
"0.55189246",
"0.54896265",
"0.54887307",
"0.544575",
"0.54397064",
"0.54236597",
"0.5418168",
"0.54089224",
"0.5387084",
"0.5378334",
"0.53766614",
"0.537... | 0.6709148 | 0 |
Returns True if this restaurant is yummy; otherwise, returns False. | def is_yummy(self):
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_found_food(self):\r\n\r\n self.has_been_checked = True\r\n if self.identified_food is not None:\r\n return True\r\n else:\r\n return False",
"def is_food(self) -> bool:\n return self in (self.off, self.off_pro)",
"def _has_turtle(self, name):\n\t\tretur... | [
"0.66611886",
"0.6279923",
"0.61343956",
"0.6131653",
"0.6115546",
"0.60490453",
"0.6042242",
"0.60213983",
"0.6009088",
"0.59308374",
"0.5875482",
"0.5845528",
"0.5844853",
"0.58259666",
"0.58231807",
"0.57674134",
"0.5748331",
"0.571837",
"0.5713479",
"0.571093",
"0.5704434... | 0.7547022 | 0 |
Use Ogata's method for Hankel Transforms in 3D for nu=0 (nu=1/2 for 2D) to convert a given power spectrum to a correlation function. | def power_to_corr_ogata(power, k, r, N=640, h=0.005):
lnk = np.log(k)
spl = spline(lnk, power)
roots = np.arange(1, N + 1)
t = h*roots
s = np.pi*np.sinh(t)
x = np.pi*roots*np.tanh(s/2)
dpsi = 1 + np.cosh(s)
dpsi[dpsi != 0] = (np.pi*t*np.cosh(t) + np.sinh(s))/dpsi[dpsi != 0]
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def power_to_corr_ogata_matrix(power, k, r, N=640, h=0.005):\r\n lnk = np.log(k)\r\n roots = np.arange(1, N + 1)\r\n t = h*roots\r\n s = np.pi*np.sinh(t)\r\n x = np.pi*roots*np.tanh(s/2)\r\n\r\n dpsi = 1 + np.cosh(s)\r\n dpsi[dpsi != 0] = (np.pi*t*np.cosh(t) + np.sinh(s))/dpsi[dpsi != 0]\r\n ... | [
"0.6133484",
"0.6131303",
"0.61156124",
"0.6097184",
"0.5964193",
"0.5935253",
"0.58684826",
"0.58155364",
"0.5729782",
"0.5729042",
"0.56461865",
"0.56124115",
"0.5606021",
"0.5522296",
"0.54273176",
"0.5400859",
"0.54005045",
"0.53976053",
"0.53870773",
"0.5365456",
"0.5354... | 0.64470875 | 0 |
Use Ogata's method for Hankel Transforms in 3D for nu=0 (nu=1/2 for 2D) to convert a given correlation function to a power spectrum | def corr_to_power_ogata(corr, r, k, N=640, h=0.005):
return 8*np.pi**3 * power_to_corr_ogata(corr,r,k,N,h) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def one_transition_spectrum_fluor(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n dd = tr[\"dd\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian ... | [
"0.6212075",
"0.612403",
"0.6069077",
"0.59723717",
"0.59370744",
"0.59056664",
"0.58523625",
"0.57545465",
"0.5681477",
"0.5679566",
"0.5661711",
"0.56271815",
"0.55051076",
"0.55033296",
"0.54322904",
"0.5421759",
"0.54203266",
"0.5394413",
"0.53819495",
"0.53730845",
"0.53... | 0.6358835 | 0 |
Use Ogata's method for Hankel Transforms in 3D for nu=0 (nu=1/2 for 2D) to convert a given power spectrum to a correlation function. In this case, `power` is a (r,k) matrix, and the computation is slightly faster for less recalculations than looping over the original. | def power_to_corr_ogata_matrix(power, k, r, N=640, h=0.005):
lnk = np.log(k)
roots = np.arange(1, N + 1)
t = h*roots
s = np.pi*np.sinh(t)
x = np.pi*roots*np.tanh(s/2)
dpsi = 1 + np.cosh(s)
dpsi[dpsi != 0] = (np.pi*t*np.cosh(t) + np.sinh(s))/dpsi[dpsi != 0]
sumparts = np.pi*np.s... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def power_to_corr_ogata(power, k, r, N=640, h=0.005):\r\n lnk = np.log(k)\r\n spl = spline(lnk, power)\r\n roots = np.arange(1, N + 1)\r\n t = h*roots\r\n s = np.pi*np.sinh(t)\r\n x = np.pi*roots*np.tanh(s/2)\r\n\r\n dpsi = 1 + np.cosh(s)\r\n dpsi[dpsi != 0] = (np.pi*t*np.cosh(t) + np.sinh(... | [
"0.75858206",
"0.7297814",
"0.6621589",
"0.6523385",
"0.62218213",
"0.55348736",
"0.5503034",
"0.55015033",
"0.5392558",
"0.52191025",
"0.52019596",
"0.5185738",
"0.51804763",
"0.51671803",
"0.5145176",
"0.5141016",
"0.5133362",
"0.5127218",
"0.50921315",
"0.5071037",
"0.5069... | 0.7499251 | 1 |
Calculate the correlation function given a power spectrum | def power_to_corr(power_func, R):
if not np.iterable(R):
R = [R]
corr = np.zeros_like(R)
# the number of steps to fit into a half-period at high-k. 6 is better than 1e-4.
minsteps = 8
# set min_k, 1e-6 should be good enough
mink = 1e-6
temp_min_k = 1.0
for i, r... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_correlation_spectrum():\r\n # Smoke-test for now - unclear what to test here...\r\n f, c = tsa.correlation_spectrum(x, y, norm=True)",
"def _pearson_correlation_coeff(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.rvalue",
"def convolve_power(power_spectrum, window_power,... | [
"0.7189788",
"0.673408",
"0.6714616",
"0.6576998",
"0.65565723",
"0.6541323",
"0.6446196",
"0.64347076",
"0.6403022",
"0.62834674",
"0.62355965",
"0.62192845",
"0.6140028",
"0.6118101",
"0.60430646",
"0.6023636",
"0.6011547",
"0.59979784",
"0.5994897",
"0.5988276",
"0.5946064... | 0.73344284 | 0 |
Populate a series of DM halos with galaxies given a HOD model. | def populate(centres, masses, halomodel=None, profile=None, hodmod=None, edges=None):
if halomodel is not None:
profile = halomodel.halo_profile
hodmod = halomodel.hod
masses = np.array(masses)
# Define which halos have central galaxies.
cgal = np.random.binomial(1, hodmod.cent... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_from_halton(self):\n halton_variables = [\n v for v in self.list if v.kind.lower() not in EXCLUDE_FROM_HALTON\n ]\n if halton_variables:\n nd_halton_seq = halton((self.samples, len(halton_variables)))\n for idx, v in enumerate(halton_variables):\n ... | [
"0.568537",
"0.56222034",
"0.5547393",
"0.5238224",
"0.5237878",
"0.5174212",
"0.5171862",
"0.5138563",
"0.51223207",
"0.509351",
"0.5085113",
"0.5034578",
"0.5034384",
"0.5033114",
"0.5007865",
"0.49874705",
"0.49872056",
"0.49819848",
"0.4976796",
"0.49730727",
"0.49553052"... | 0.6808839 | 0 |
Return true if the magnitude is 1 | def is_unit(self):
return math.isclose(self.magnitude(), 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def magnitude(self):\n return sqrt(self & self)",
"def __bool__(self):\n return self[0] != 0.0 or self[1] != 0.0",
"def is_imaginary(self) -> bool:\n return self < 0",
"def isone(a: float) -> bool:\n return np.isclose(a, 1.0, atol=1.0e-8, rtol=0.0)",
"def is_real(self) -> bool:\n ... | [
"0.67833275",
"0.6677849",
"0.6601169",
"0.6581789",
"0.64836466",
"0.6439738",
"0.6422641",
"0.641897",
"0.6388736",
"0.63666415",
"0.62939376",
"0.62938464",
"0.6264597",
"0.6241439",
"0.62216634",
"0.61793745",
"0.616768",
"0.61614376",
"0.61560506",
"0.61425555",
"0.61315... | 0.70091724 | 0 |
Return the angle to the vector other | def angle(self, other):
return acosd(self.normalized().dot(other.normalized())) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angle(self, other):\n return acosd(np.clip(self.uv().dot(other.uv()), -1, 1))",
"def angle_to(self, other):\n return other.angle - self.angle",
"def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))",
"def get_angle_between(... | [
"0.8653876",
"0.82146144",
"0.81834894",
"0.8172939",
"0.81702507",
"0.8123051",
"0.79451007",
"0.79445916",
"0.7944181",
"0.7936801",
"0.79225296",
"0.7910151",
"0.7838432",
"0.78287446",
"0.77956486",
"0.7786319",
"0.77857083",
"0.7780361",
"0.7773948",
"0.7762068",
"0.7740... | 0.84712195 | 1 |
Return vector rejection on the vector other | def reject(self, other):
return self - self.project(other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __neg__(self):\n return self.from_points(-v for v in self._vectors)",
"def __neg__(self):\n a = -self._ar\n return Vector(a)",
"def __neg__(self):\n return Vector([-c for c in self.components])",
"def __sub__(self,other):\n return np.linalg.norm(self.ngdv-other.ngdv)",
... | [
"0.59731126",
"0.57866377",
"0.5741179",
"0.57054615",
"0.5669241",
"0.5668815",
"0.55966514",
"0.5542648",
"0.5542214",
"0.55375445",
"0.5508057",
"0.5484324",
"0.54819685",
"0.54819685",
"0.54683065",
"0.54621327",
"0.54572034",
"0.54521096",
"0.5442076",
"0.543183",
"0.538... | 0.6852742 | 0 |
Return a spherical linear interpolation between self and other vector Note that for nonunit vectors the interpolation is not uniform | def slerp(self, other, t):
a, b = Vector3(self), Vector3(other)
theta = a.angle(b)
return type(self)(a * sind((1 - t) * theta) + b * sind(t * theta)) / sind(theta) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_linear(self):\n return inv(quad_hybrid).dot(self.circular)",
"def interpolate_hypersphere(v1, v2, num_steps):\n v1_norm = tf.norm(v1)\n v2_norm = tf.norm(v2)\n v2_normalized = v2 * (v1_norm / v2_norm)\n vectors = []\n for step in range(num_steps):\n interpolated = v1 + (v2_normali... | [
"0.6115919",
"0.6058028",
"0.60471",
"0.5980602",
"0.59443694",
"0.582192",
"0.5801971",
"0.5778215",
"0.5748797",
"0.5745295",
"0.57203645",
"0.5715169",
"0.5693372",
"0.56862926",
"0.56783795",
"0.5676342",
"0.5653637",
"0.56283605",
"0.56083214",
"0.5581304",
"0.5563196",
... | 0.6161639 | 0 |
Create unit length vector in zdirection | def unit_z(cls):
return cls(0, 0, 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vec_z(self):\t\t\t\r\n if self.oz != 0:\r\n ov = self.oz\r\n lv = self.self.lz + self.oz\r\n else:\r\n ov = self.dz / 2\r\n lv = self.lz\r\n\r\n zv = \"\"\r\n for num in np.arange(ov, lv, self.dz):\r\n zv += str(num) + \" \"\r\n... | [
"0.72920877",
"0.6870971",
"0.6799923",
"0.66981107",
"0.6633959",
"0.66133875",
"0.6550772",
"0.6353938",
"0.63386774",
"0.6324437",
"0.6277576",
"0.6207572",
"0.6163615",
"0.6163615",
"0.6163615",
"0.6128821",
"0.6116663",
"0.60971904",
"0.60963476",
"0.60963476",
"0.609634... | 0.7064322 | 1 |
Return the vector rotated around axis through angle theta. Right hand rule applies | def rotate(self, axis, theta):
v = Vector3(self) # ensure vector
k = Vector3(axis.uv())
return type(self)(
cosd(theta) * v
+ sind(theta) * k.cross(v)
+ (1 - cosd(theta)) * k * (k.dot(v))
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xy_rotation(vector,theta):\r\n R = np.array([[np.cos(theta), -np.sin(theta),0],\r\n [np.sin(theta), np.cos(theta),0],\r\n [0,0,1]\r\n ])\r\n return np.dot(R,vector)",
"def rot(theta):\n cos = np.cos(theta)\n sin = np.sin(theta)\n return( np.ar... | [
"0.75467306",
"0.7465286",
"0.74494255",
"0.74160385",
"0.73571455",
"0.7356651",
"0.7304112",
"0.72797716",
"0.7279729",
"0.72671384",
"0.7189685",
"0.7174742",
"0.7168985",
"0.71672356",
"0.71182173",
"0.71027213",
"0.7097267",
"0.709328",
"0.70914054",
"0.7085537",
"0.7083... | 0.8187367 | 0 |
Return the angle to the vector other | def angle(self, other):
return acosd(np.clip(self.uv().dot(other.uv()), -1, 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angle(self, other):\n return acosd(self.normalized().dot(other.normalized()))",
"def angle_to(self, other):\n return other.angle - self.angle",
"def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))",
"def get_angle_between(... | [
"0.84712195",
"0.82146144",
"0.81834894",
"0.8172939",
"0.81702507",
"0.8123051",
"0.79451007",
"0.79445916",
"0.7944181",
"0.7936801",
"0.79225296",
"0.7910151",
"0.7838432",
"0.78287446",
"0.77956486",
"0.7786319",
"0.77857083",
"0.7780361",
"0.7773948",
"0.7762068",
"0.774... | 0.8653876 | 0 |
Returns placeholders for the given feature spec. Returns a dictionary of placeholders with the same type and shape as calling tf.parse_example with the given feature spec. | def feature_spec_as_batched_placeholders(feature_spec):
result = {}
for name, spec in six.iteritems(feature_spec):
if spec.dtype not in (tf.int64, tf.float32, tf.string):
raise ValueError('{} had invalid dtype'.format(spec))
if isinstance(spec, tf.FixedLenFeature):
result[name] = tf.placeholder(... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def batched_placeholders_from_specs(specs):\n if not (all([_is_feature_spec(s) for s in six.itervalues(specs)]) or\n all([isinstance(s, tf.TypeSpec) for s in six.itervalues(specs)])):\n raise TypeError('Specs must be all tf.TypeSpecs or feature specs. '\n 'Mixing is not allowed. Got... | [
"0.68627125",
"0.62956125",
"0.61024445",
"0.5971513",
"0.5948509",
"0.58165884",
"0.558762",
"0.5515218",
"0.54961",
"0.54718864",
"0.54472584",
"0.54222244",
"0.5409661",
"0.54093456",
"0.54090065",
"0.5377132",
"0.53761154",
"0.5354805",
"0.5327928",
"0.53272367",
"0.53218... | 0.8073673 | 0 |
Converts a list of instance indices to the corresponding batch indices. Given a list of iterables representing the indices of N sparse tensors, creates a single list of indices representing the result of concatenating the sparse tensors along the 0'th dimension into a batch of size N. | def make_batch_indices(instance_indices):
batch_indices = list(itertools.chain.from_iterable([
[(row_number, index) for index in indices]
for row_number, indices in enumerate(instance_indices)
]))
# Indices must have shape (?, 2). Therefore if we encounter an empty
# batch, we return an ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tensor2indices(batch_sents):\n size_batch = tf.shape(batch_sents)[0]\n len_batch = tf.shape(batch_sents)[1]\n batch_i = tf.range(size_batch)\n len_i = tf.range(len_batch)\n\n # [0,0,0,1,1,1,2,2,2,...]\n batch_i = tf.tile(batch_i[:, None], [1, len_batch])\n # [0,... | [
"0.6757593",
"0.668924",
"0.6647247",
"0.6525968",
"0.63507",
"0.6347478",
"0.6330641",
"0.6165496",
"0.6137877",
"0.6136195",
"0.61189455",
"0.6038336",
"0.5884571",
"0.58565253",
"0.58565253",
"0.5856108",
"0.5844865",
"0.58287096",
"0.5819618",
"0.5785525",
"0.5750782",
... | 0.73625505 | 0 |
Converts a list of sparse instances into a sparse batch. Takes lists representing the indices and values of N sparse instances and concatenates them along the 0'th dimension into a sparse batch of size N. | def make_sparse_batch(instance_indices, instance_values, max_index):
batch_indices = make_batch_indices(instance_indices)
batch_values = list(itertools.chain.from_iterable(instance_values))
batch_shape = (len(instance_indices), max_index)
return tf.SparseTensorValue(batch_indices, batch_values, batch_sh... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_to_sparse(inputs):\n\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(\n *[itertools.repeat(i, len(x)) for i, x in enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n\n s = coo_matrix((data, (row, col)), shape=(\n len(inp... | [
"0.66795045",
"0.66483355",
"0.65239143",
"0.6505503",
"0.635059",
"0.6299451",
"0.6201308",
"0.6102207",
"0.60624063",
"0.5984515",
"0.59727436",
"0.5967407",
"0.59385884",
"0.5926944",
"0.5847603",
"0.5769515",
"0.5766314",
"0.57244736",
"0.5662259",
"0.56046027",
"0.557581... | 0.7440366 | 0 |
Decomposes a sparse batch into a list of sparse instances. | def decompose_sparse_batch(sparse_value):
batch_indices, batch_values, batch_shape = sparse_value
# Preallocate lists of length batch_size, initialized to empty ndarrays,
# representing the indices and values of instances. We can reuse the return
# value of _get_empty_array here because it is immutable.... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _decompose_sparse_batch(\n sparse_value: _SparseTensorValueType\n) -> Tuple[List[_SparseComponentType], _SparseComponentType]:\n batch_indices, batch_values, batch_shape = _extract_sparse_components(\n sparse_value)\n batch_size = batch_shape[0]\n instance_rank = len(batch_shape) - 1\n\n # Prealloc... | [
"0.75086296",
"0.7059808",
"0.70122707",
"0.62066376",
"0.6091215",
"0.59720814",
"0.5895833",
"0.57921684",
"0.57378924",
"0.5696345",
"0.56844103",
"0.56454796",
"0.5540356",
"0.5498296",
"0.549795",
"0.5489116",
"0.54844165",
"0.54786456",
"0.5451528",
"0.54457694",
"0.542... | 0.7623021 | 0 |
Returns a list of `Phase`s describing how to execute the pipeline. The default graph is assumed to contain some `Analyzer`s which must be executed by doing a full pass over the dataset, and passing the inputs for that analyzer into some implementation, then taking the results and replacing the `Analyzer`s outputs with ... | def create_phases(inputs):
feed_tensors = inputs.values()
remaining_analyzers = tf.get_collection(analyzers.ANALYZER_COLLECTION)
analyzer_output_ready = {}
for analyzer in remaining_analyzers:
for tensor in analyzer.outputs:
analyzer_output_ready[tensor] = False
# Construct `AnalyzerInfo`s, removi... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def standard_optimize_phases():\n return [\n annotate_downstream_side_inputs,\n annotate_stateful_dofns_as_roots,\n fix_side_input_pcoll_coders,\n pack_combiners,\n lift_combiners,\n expand_sdf,\n fix_flatten_coders,\n # sink_flattens,\n greedily_fuse,\n read_to_i... | [
"0.5932127",
"0.51500165",
"0.49873027",
"0.49778447",
"0.4923041",
"0.4881102",
"0.48662743",
"0.47729826",
"0.47422072",
"0.47227156",
"0.4705899",
"0.4686329",
"0.4661761",
"0.46547806",
"0.46406043",
"0.46231586",
"0.45860365",
"0.45815903",
"0.45766085",
"0.45731992",
"0... | 0.61836445 | 0 |
Makes deep copies of a dict of tensors. Makes deep copies (using tf.identity or its equivalent for `SparseTensor`s) of the values of `tensors`. | def copy_tensors(tensors):
return {name: _copy_tensor_or_sparse_tensor(tensor)
for name, tensor in six.iteritems(tensors)} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ensure_memory_shared(*tensors):\n for tensor_dict in tensors:\n for _, _, t in iterate_recursively(tensor_dict):\n assert t.is_shared()",
"def batch_tensor_dicts(tensor_dicts: List[Dict[str, NDArray]],\n remove_trailing_dimension: bool = False) -> Dict[str, NDArray]... | [
"0.60108316",
"0.584757",
"0.561565",
"0.55964327",
"0.5397415",
"0.5387706",
"0.5362882",
"0.53165406",
"0.5307602",
"0.5084689",
"0.5029376",
"0.49999312",
"0.49846148",
"0.49846148",
"0.49414432",
"0.49315056",
"0.48956892",
"0.4890911",
"0.48315462",
"0.47887108",
"0.4771... | 0.7972645 | 0 |
Compute the stop condition in the main loop dot(dtheta, rminus) >= 0 & dot(dtheta, rplus >= 0) INPUTS | def stop_criterion(thetaminus, thetaplus, rminus, rplus):
dtheta = thetaplus - thetaminus
return (np.dot(dtheta, rminus.T) >= 0) & (np.dot(dtheta, rplus.T) >= 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def terminate_condition(self, p_a, p_b):\n if np.sum(abs(p_a - p_b), axis = None) <= self.thres:\n return True\n else:\n return False",
"def corr_deriv(inp0, inp1, flag: bool = False):\n\n # 1st derivative\n tx1 = inp0.time.data.astype(int) * 1e-9\n inp0 = inp0.data\n... | [
"0.5723442",
"0.5556065",
"0.552444",
"0.54258823",
"0.52677983",
"0.5231826",
"0.5189423",
"0.51769894",
"0.51696867",
"0.51427674",
"0.51368713",
"0.5124865",
"0.5124134",
"0.5124134",
"0.51046866",
"0.5100459",
"0.50978076",
"0.50764555",
"0.50613034",
"0.50531983",
"0.504... | 0.7494795 | 0 |
Implements the NoUTurn Sampler (NUTS) algorithm 6 from from the NUTS paper (Hoffman & Gelman, 2011). Runs Madapt steps of burnin, during which it adapts the step size parameter epsilon, then starts generating samples to return. Note the initial step size is tricky and not exactly the one from the initial paper. In fact... | def nuts6(f, M, Madapt, theta0, delta=0.6, epsilon=None):
if len(np.shape(theta0)) > 1:
raise ValueError('theta0 is expected to be a 1-D array')
D = len(theta0)
samples = np.empty((M + Madapt, D), dtype=float)
lnprob = np.empty(M + Madapt, dtype=float)
logp, grad = f(theta0)
samples[0... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample(\n self,\n repetitions,\n nChains=3,\n burnIn=100,\n thin=1,\n convergenceCriteria=0.8,\n variables_of_interest=None,\n DEpairs=2,\n adaptationRate=\"auto\",\n eps=5e-2,\n mConvergence=True,\n mAccept=True,\n ):\n\n ... | [
"0.6087951",
"0.5735027",
"0.57197136",
"0.56681645",
"0.55922085",
"0.5526659",
"0.5526659",
"0.5452105",
"0.5444635",
"0.542529",
"0.54031235",
"0.5367326",
"0.5355291",
"0.5340751",
"0.5272093",
"0.52532023",
"0.52501166",
"0.5244941",
"0.52425146",
"0.5238437",
"0.5223300... | 0.6030356 | 1 |
Lookup node containing data data node data object to look up parent node's parent node and node's parent if found or None, None | def lookup(self, data, parent=None):
if data < self.data:
if self.left is None:
return None, None
return self.left.lookup(data, self)
elif data > self.data:
if self.right is None:
return None, None
return self.right.lookup(d... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _lookup(self, data):\n parent, current = None, self.root\n while current:\n if current < data: # data should be in right\n parent, current = current, current.right\n elif current > data: # data should be in left\n parent, current = current, cu... | [
"0.7425228",
"0.6985528",
"0.69435424",
"0.68891305",
"0.68891305",
"0.67501795",
"0.6696298",
"0.6693753",
"0.6590292",
"0.6486283",
"0.6464522",
"0.6423334",
"0.6411262",
"0.64088595",
"0.6405606",
"0.638553",
"0.6373586",
"0.63373864",
"0.63106203",
"0.62950546",
"0.627723... | 0.71603155 | 1 |
Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions. | def pca(X = Math.array([]), no_dims = 50):
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _apply_pca(self, X):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX",
"def PCA(X, dims_rescale... | [
"0.7625521",
"0.72268724",
"0.71779853",
"0.7173679",
"0.7090666",
"0.70254475",
"0.6853636",
"0.6750165",
"0.66168493",
"0.66058534",
"0.65728754",
"0.6491194",
"0.645732",
"0.64447856",
"0.6355538",
"0.6327474",
"0.6276618",
"0.6235171",
"0.6203619",
"0.6150215",
"0.6129754... | 0.83336514 | 0 |
Funcion que recibe un nombre de archivo como entrada, lee la informacion sobre los cajones que cargo el camion usando el modulo csv y devuelve el costo de la carga de frutas como una variable de punto flotante. Atrapa errores en el archivo si faltan datos, imprime un warning y continua corriendo. | def costo_camion(nombre_archivo):
f = open(nombre_archivo, encoding='utf8')
rows = csv.reader(f)
next(rows)
frutas = []
for row in rows:
try:
row[1] = int(row[1])
except ValueError:
print(f'Warning: {row[0]} no tiene un precio válido')
try:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def carga_fosa( archivo_fosa ):\n\n fosa = np.genfromtxt( archivo_fosa, delimiter = \",\" )\n lons = fosa[:,0]\n lats = fosa[:,1]\n\n return lons, lats",
"def lecture_fichier(path_data_frame):\n logger = logging.getLogger('Lecture du fichier')\n log = \"#### DPNMaker 1.0..............\\n### Mir... | [
"0.62622017",
"0.6205029",
"0.60632795",
"0.5973948",
"0.5914015",
"0.5898199",
"0.58734465",
"0.58703566",
"0.5867143",
"0.58504957",
"0.5782583",
"0.5779599",
"0.5751626",
"0.5748259",
"0.57175225",
"0.57104826",
"0.56668574",
"0.56507885",
"0.5645297",
"0.56275505",
"0.562... | 0.73341614 | 1 |
Generate custom rst docs defined by a project. Projects needing custom docs generated should place executable scripts in doc/generate that output rst data which gets written to the same subdirectory paths under doc/generated. For example, during doc build the executable python script doc/generate/custom/doc.py gets run... | def _generate_custom(project, docdir, gendir):
custom_dir = os.path.join(docdir, 'generate')
print(f"Generating custom docs for {project} in {gendir!r}")
for root, _dirs, files in os.walk(custom_dir):
subdir = root.split(custom_dir, 1)[1].strip('/')
if subdir:
try:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])",
"def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')",
"def generate(self):\n\n # Write Doxyfile\n doxyfile_content =... | [
"0.7181522",
"0.6929639",
"0.67382836",
"0.6735726",
"0.67243713",
"0.66928166",
"0.6628833",
"0.66091406",
"0.6474424",
"0.6460677",
"0.64147335",
"0.6411518",
"0.6365201",
"0.6326307",
"0.6314046",
"0.63031334",
"0.63014746",
"0.62075216",
"0.6163829",
"0.6154218",
"0.61308... | 0.8208421 | 0 |
Generate man page rst docs for a module's installed scripts. This assumes that all the files in the 'bin' directory under the main repo root are targeted scripts. | def generate_man(repo_dir, package_dir, module):
docdir = os.path.join(repo_dir, 'doc')
gendir = os.path.join(docdir, 'generated')
print(f"Generating files for {module} man pages in {gendir!r}")
scripts = os.listdir(os.path.abspath(os.path.join(repo_dir, 'bin')))
# Replace '-' with '_' due to pyth... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')",
"def generate_html(repo_dir, package_dir, module):\n apidir = os.path.join(repo_dir, 'doc', 'api')\n print(f\"Generating {module} API docs in {apidir!r}\")\n if subprocess.call(['sphinx-apidoc', '-Tef', '-o', apidir,\n ... | [
"0.6408381",
"0.63208246",
"0.6242216",
"0.6218134",
"0.61578363",
"0.61282766",
"0.60707325",
"0.59522146",
"0.59248513",
"0.59152114",
"0.58843786",
"0.58818114",
"0.58814114",
"0.58243656",
"0.576492",
"0.57128346",
"0.56600124",
"0.5645169",
"0.5632052",
"0.56288034",
"0.... | 0.70473146 | 0 |
Creates and returns the AumPC40 script | def create_instance(c_instance):
return AumPC40(c_instance) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_script(game_title):\n script = '{}{}\\\"{}\\\"'.format(BashHeader, StreamString, game_title)\n print('\\nCreating a script for {}:'.format(game_title))\n print(script)\n return script",
"def generate_stack_script(self):\n\n # Generate the stacking script\n # configuration, then m... | [
"0.59724087",
"0.5707068",
"0.5653505",
"0.559593",
"0.5523555",
"0.551867",
"0.55151594",
"0.53660375",
"0.52607155",
"0.52340364",
"0.523011",
"0.5224939",
"0.52103066",
"0.52010155",
"0.5195948",
"0.51954913",
"0.5187385",
"0.5186254",
"0.5169855",
"0.516934",
"0.5163394",... | 0.6038319 | 0 |
Define Persistent Store Settings. | def persistent_store_settings(self):
ps_settings = (
PersistentStoreDatabaseSetting(
name='tethys_super',
description='primary database',
initializer='heda.model.init_primary_db',
required=True
),
)
return p... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def persistent_stores(self):\n stores = (PersistentStore(name='sites_db',\n initializer='init_stores:init_sites_db',\n spatial=True\n ),\n )\n\n return stores",
"def __getSettingsFromStorage():\n return Accou... | [
"0.6257487",
"0.6162365",
"0.61609375",
"0.6112641",
"0.6072356",
"0.6051853",
"0.59530896",
"0.59401035",
"0.59371054",
"0.5936716",
"0.5869987",
"0.58563447",
"0.5823858",
"0.58226293",
"0.5788194",
"0.57841325",
"0.5772496",
"0.5753192",
"0.5748388",
"0.57304484",
"0.57297... | 0.7771509 | 0 |
Add any keys to metric_cols which don't already exist | def add_metrics(_dict):
for key, itr in _dict.items():
if key not in self.metric_cols:
self.metric_cols.append(key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n ... | [
"0.60068303",
"0.585265",
"0.5817997",
"0.5661291",
"0.5444574",
"0.5444036",
"0.541413",
"0.53904605",
"0.530381",
"0.5274594",
"0.5213627",
"0.5193701",
"0.5181368",
"0.5149117",
"0.51435626",
"0.51204664",
"0.5098862",
"0.50848967",
"0.5061758",
"0.5061609",
"0.50592345",
... | 0.8019414 | 0 |
Process the regex data for func/file/line info | def process_regex(_data):
_tmp = {}
if _data is not None and len(_data.groups()) > 0:
for _key in ("head", "func", "file", "line", "tail"):
try:
_val = _data.group(_key)
if _val:
_tmp[... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compile_regex(self):\n self.raw = self._compile('rawLevel')\n self.run = self._compile('runLevel')\n self.sample = self._compile('sampleLevel')\n self.agg = self._compile('aggLevel')",
"def parse_line(line):\n\tpattern = re.compile(\"\"\"\n\t\t[ \\t]*\n\t\t(private|package|remote... | [
"0.6273118",
"0.6101127",
"0.5907121",
"0.5593521",
"0.55559194",
"0.55496764",
"0.55365586",
"0.5492298",
"0.5444506",
"0.54420054",
"0.54311144",
"0.5417456",
"0.5397646",
"0.5341233",
"0.52872264",
"0.52838296",
"0.5275809",
"0.52700585",
"0.5259383",
"0.5213508",
"0.51947... | 0.7346875 | 0 |
Get the standard set of dictionary entries. Also, parses the prefix for funcfileline info | def get_name_line_file(_prefix):
_keys = {
"type": "region",
"name": _prefix,
}
_extra = {"file": "<unknown>", "line": "0"}
_pdict = perform_regex(_prefix)
if _pdict is not None:
if "head" in _pdict:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_local_jsonld_context(fn: Union[str, Path]) -> PREFIX_MAP:\n with open(fn) as file:\n return extract_prefixmap(json.load(file))",
"def pre_lookup(self, file):\n return {}",
"def _get_prefix_attributes(self, prefix):\n return filter_dict_by_prefix(self.__dict__, prefix)",
"def ... | [
"0.57699853",
"0.5621191",
"0.54813886",
"0.5444428",
"0.5420293",
"0.53725725",
"0.5354615",
"0.5323763",
"0.5317872",
"0.52981365",
"0.52797264",
"0.52431947",
"0.522648",
"0.5214387",
"0.5202403",
"0.51770306",
"0.51639783",
"0.5157965",
"0.51198345",
"0.5119783",
"0.51136... | 0.6247918 | 0 |
node/rank/thread id may be int, array of ints, or None. When the entry is a list of integers (which happens when metric values are aggregates of multiple ranks/threads), this function generates a consistent form which is NOT numerical to avoid `groupby(...).sum()` operations from producing something nonsensical (i.e. a... | def collapse_ids(_obj, _expect_scalar=False):
if isinstance(_obj, list):
if len(_obj) == 1:
return int(_obj[0])
else:
if _expect_scalar:
raise ValueError(
f"collapse_ids expected per-r... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_entry(self, hash_):\n vals = self.table[hash_, :min(self.depth, self.counts[hash_])]\n maxtimemask = (1 << self.matimebits) - 1\n # ids we report externally start at 0, but in table they start at 1.\n ids = (vals >> self.maxtimebits) - 1\n return np.c_[ids, vals & maxtime... | [
"0.49416804",
"0.49196684",
"0.48498648",
"0.47921938",
"0.47571516",
"0.47073448",
"0.4663383",
"0.45756686",
"0.45234352",
"0.45199615",
"0.45061395",
"0.44886374",
"0.44695598",
"0.4460793",
"0.44548059",
"0.44545692",
"0.4452105",
"0.44430593",
"0.44273648",
"0.43595484",
... | 0.53707373 | 0 |
Offer to fix broken regions in Duke's Archives. | def offer_fix_broken_regions(self, with_window: ProjectWindow = None):
if with_window:
result = with_window.CustomDialog(
title="Region Cleanup",
message="In vanilla Dark Souls, the Duke's Archives has four unused regions that can break event\n"
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fixup(self):\n raise Exception(\"Fixup not implemented yet!\")",
"def test_avalanche_warning_by_region_simple(self):\n pass",
"def test_avalanche_warning_by_region_detail(self):\n pass",
"def test_avalanche_warning_by_region_obs(self):\n pass",
"def cleanup_regions(self, tim... | [
"0.6345304",
"0.62692577",
"0.62618273",
"0.58115226",
"0.56628436",
"0.56044966",
"0.5581608",
"0.53429747",
"0.53163654",
"0.52685386",
"0.52399015",
"0.5238566",
"0.5236003",
"0.51698273",
"0.5144018",
"0.5085782",
"0.5079931",
"0.50548935",
"0.50456464",
"0.5032556",
"0.5... | 0.66855806 | 0 |
Offer to export all entities modules. | def offer_entities_export(self, with_window: ProjectWindow = None):
# TODO: Offer to automatically set MSB entity ID sync with modules.
if with_window:
result = with_window.CustomDialog(
title="Entities Export",
message="Would you also like to export all 'enti... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_entities(\n self,\n ) -> Callable[\n [datastore_admin.ExportEntitiesRequest], Awaitable[operations_pb2.Operation]\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we ... | [
"0.65456015",
"0.6162018",
"0.6102753",
"0.59789",
"0.58326054",
"0.5689161",
"0.5651794",
"0.5523627",
"0.5466822",
"0.5415876",
"0.54065853",
"0.54011685",
"0.54011166",
"0.53622013",
"0.53334165",
"0.5328926",
"0.531702",
"0.5250723",
"0.52474093",
"0.5241811",
"0.523484",... | 0.7407204 | 0 |
computing filter banks is applying triangular filters, typically 40 filters, on a Melscale to the power spectrum to extract frequency bands. | def make_filter_banks(power_frames, sampling_rate, NFFT, num_filt = 40):
low_freq_mel = 0
high_freq_mel = Hz_to_Mel(sampling_rate/2) # Convert Hz to Mel
#mel_points = np.arange(low_freq_mel, high_freq_mel, (high_freq_mel - low_freq_mel)/(num_filt + 2)) # Equally spaced in Mel scale
mel_points = np.linspace(lo... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mfccInitFilterBanks(fs, nfft):\n # filter bank params:\n lowfreq = 133.33\n linsc = 200/3.\n logsc = 1.0711703\n numLinFiltTotal = 13\n numLogFilt = 27\n\n if fs < 8000:\n nlogfil = 5\n\n # Total number of filters\n nFiltTotal = numLinFiltTotal + numLogFilt\n\n # Compute fr... | [
"0.7149337",
"0.71395457",
"0.6799719",
"0.67366964",
"0.66485703",
"0.6620555",
"0.6602272",
"0.65465355",
"0.65451074",
"0.6328881",
"0.6297695",
"0.6201124",
"0.61510277",
"0.6065396",
"0.60415995",
"0.5999311",
"0.59982884",
"0.5987474",
"0.59712106",
"0.5961633",
"0.5959... | 0.74797165 | 0 |
Return dict consisting of child threads. | def get_threads(pid: int) -> dict:
threads_map = defaultdict(list)
proc = psutil.Process(pid)
for thread in proc.threads():
threads_map[psutil.Process(thread.id).name()].append(thread.id)
return threads_map | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def threads(self):\n return self.rpc.call(MsfRpcMethod.CoreThreadList)",
"def message_threads(self):\r\n return resource.MessageThreads(self)",
"def threads(self, **kwargs):\n return stats.threads(self._host, self._session, **kwargs)",
"def Threads():\n for i in range(0, idc.get_threa... | [
"0.65268487",
"0.63145936",
"0.62256175",
"0.59941864",
"0.5970034",
"0.59116906",
"0.58653396",
"0.58576524",
"0.58159345",
"0.5813073",
"0.5806315",
"0.57842577",
"0.5783787",
"0.5665454",
"0.56502837",
"0.56432664",
"0.5621911",
"0.5619956",
"0.55661124",
"0.5542665",
"0.5... | 0.73409736 | 0 |
Set CPU affinity for a thread. | def set_cpu_affinity(pid: int, cpulist: list) -> list:
real_cpulist = list(map(CpuMap, cpulist))
return psutil.Process(pid).cpu_affinity(real_cpulist) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cpu(self, cpu):\n\n self._cpu = cpu",
"def system_affinity():\n return CPUSet((0,))",
"def _set_cpu_num(self, cpu_num=None):\n if cpu_num is not None:\n assert isinstance(cpu_num, int), \"cpu_num should be INT type, please check the input type.\"\n self.cpu_num = cpu_... | [
"0.64995265",
"0.63755697",
"0.63431793",
"0.6285226",
"0.6255081",
"0.6120596",
"0.61158234",
"0.6076468",
"0.5897732",
"0.58026326",
"0.57682025",
"0.5766153",
"0.5629418",
"0.5620593",
"0.55707806",
"0.5543859",
"0.5461979",
"0.5422937",
"0.54224724",
"0.5369957",
"0.53595... | 0.6598575 | 0 |
Instantiate the handler process with arguments. | def __init__(self, name, args):
self._proc = None
self._args = [f"/{name}"]
self._args.extend(args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_new_handler(self, *args, **kwargs):",
"def __init__(self, *args, **kwargs):\n\n super().__init__()\n\n # a dictionary containing cli handlers for different commands.\n # in the form of: {str handler_name: CLIHandlerBase handler}\n self._cli_handlers = DTO()",
"def create(cl... | [
"0.6665165",
"0.63680065",
"0.6289797",
"0.62417436",
"0.6064677",
"0.603997",
"0.60281605",
"0.60247",
"0.59842175",
"0.5963034",
"0.5955707",
"0.5954694",
"0.5900212",
"0.58994555",
"0.5898236",
"0.5894874",
"0.5879078",
"0.58764035",
"0.58647084",
"0.5860263",
"0.58119124"... | 0.63794136 | 1 |
Initialize the parser with the content. | def __init__(self, content):
self._content = content.strip() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(self, content):\n pass",
"def __init__(self, *args, **kw):\n self.parser = Parser(*args, **kw)",
"def setup_parser(self, parser):",
"def __init__(self, parser=None):",
"def setup_parse(self, inputstring: str, document: nodes.document) -> None:\n self.inputstring = inputstring... | [
"0.7082806",
"0.6850895",
"0.67596895",
"0.6739193",
"0.673784",
"0.67213154",
"0.666763",
"0.6624429",
"0.6594135",
"0.65809095",
"0.65353376",
"0.65250313",
"0.6489474",
"0.6432869",
"0.6423918",
"0.63843936",
"0.6366465",
"0.63090724",
"0.6292234",
"0.6263073",
"0.6234803"... | 0.6878618 | 1 |
Return true if the parser content is a range. | def _is_range(cls, rng):
match = re.search("([0-9][1-9]*)-([0-9][1-9]*)", rng)
# Group is a singular value.
return match is not None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isRangeValid(self) -> bool:\n ...",
"def f_has_range(self):\n return len(self._explored_range) > 0",
"def f_has_range(self):\n raise NotImplementedError(\"Should have implemented this.\")",
"def _is_range_boundary(boundary):\n return (isinstance(boundary, numbers.Integral) or\n ... | [
"0.71240944",
"0.6775627",
"0.6536776",
"0.65161526",
"0.64173216",
"0.6324712",
"0.6230552",
"0.6215546",
"0.61716866",
"0.61548144",
"0.6073405",
"0.60168177",
"0.6005181",
"0.6000694",
"0.5982445",
"0.5956555",
"0.58861923",
"0.5859302",
"0.5810263",
"0.58083",
"0.5794301"... | 0.70603925 | 1 |
Parse list formats for cpuset and mems. | def parse(self):
if len(self._content) == 0:
return []
groups = self._content.split(",")
arr = set()
def func(acc, cpu):
if ListFormatParser._is_range(cpu):
acc.update(ListFormatParser._range_to_list(cpu))
else:
acc.ad... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _cpus(cls):\n # The real processor map is found at different paths based on cgroups version:\n # - cgroupsv1: /cpuset.cpus\n # - cgroupsv2: /cpuset.cpus.effective\n # For more details, see https://docs.kernel.org/admin-guide/cgroup-v2.html#cpuset-interface-files\n cpulist =... | [
"0.61106426",
"0.57944417",
"0.5472716",
"0.54110897",
"0.5292223",
"0.52750033",
"0.52684903",
"0.52409875",
"0.5159124",
"0.5158192",
"0.5128918",
"0.5102793",
"0.51020837",
"0.5092175",
"0.5034157",
"0.5026708",
"0.5013118",
"0.49748427",
"0.49395618",
"0.4915944",
"0.4915... | 0.6723798 | 0 |
Add a failure entry. | def add_row(self, failure: str):
self.failures.append(f"{failure}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addFailure(self, test, err):\n self.failures.append((proto_test(test), proto_error(err)))",
"def addFailure(self, test, err):\r\n self.failures.append((test, self._exc_info_to_string(err, test)))\r\n self._mirrorOutput = True",
"def add_failure(self):\n failure_time = time.time(... | [
"0.74723893",
"0.74411285",
"0.74344665",
"0.73695433",
"0.7364384",
"0.72395444",
"0.7161841",
"0.7161526",
"0.7126863",
"0.69641733",
"0.69077164",
"0.67722815",
"0.6724996",
"0.67004424",
"0.6689054",
"0.6682879",
"0.66808593",
"0.6652856",
"0.6645837",
"0.66377515",
"0.65... | 0.7955201 | 0 |
Run a shell command and search a given regex object in stdout. If the regex object is not found, a RuntimeError exception is raised. | def search_output_from_cmd(cmd: str, find_regex: typing.Pattern) -> typing.Match:
# Run the given command in a shell
_, stdout, _ = run_cmd(cmd)
# Search for the object
content = re.search(find_regex, stdout)
# If the result is not None, return it
if content:
return content
raise ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shell_with_regex(self,\n command,\n regex,\n regex_group=1,\n command_name=\"shell\",\n raise_error=False,\n tries=1,\n port=0):\n return self.command_wit... | [
"0.7018398",
"0.6930831",
"0.6701672",
"0.6701672",
"0.6436299",
"0.6301566",
"0.61902905",
"0.61621624",
"0.5845098",
"0.58345646",
"0.5729408",
"0.5654881",
"0.558863",
"0.55799997",
"0.5539821",
"0.5509579",
"0.547885",
"0.5450089",
"0.54235727",
"0.5422032",
"0.5418474",
... | 0.7784299 | 0 |
Get how much free memory in kB a guest sees, over ssh. | def get_free_mem_ssh(ssh_connection):
_, stdout, stderr = ssh_connection.run("cat /proc/meminfo | grep MemAvailable")
assert stderr == ""
# Split "MemAvailable: 123456 kB" and validate it
meminfo_data = stdout.split()
if len(meminfo_data) == 3:
# Return the middle element in the array
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def memory(self):\n # Run 'free -m' command and make a list from output.\n mem_data = self.execCMD('free', '-m').split()\n total_mem = int(mem_data[7]) / 1024.\n used_mem = int(mem_data[15]) / 1024.\n # Caculate percentage\n used_mem_percent = int(used_mem / (total_mem / 1... | [
"0.7755763",
"0.7604394",
"0.74888974",
"0.7483772",
"0.7379078",
"0.7304719",
"0.7277436",
"0.72444075",
"0.72397685",
"0.72174746",
"0.7211039",
"0.7157638",
"0.71569777",
"0.7151905",
"0.71383536",
"0.7126989",
"0.7019839",
"0.7014171",
"0.69990796",
"0.69376457",
"0.69188... | 0.7866845 | 0 |
Map version for Python 3.x which is eager and returns nothing. | def eager_map(func, iterable):
for _ in map(func, iterable):
continue | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map():",
"def map(z):\n pass",
"def _map_fn(self):\n raise NotImplementedError",
"def simple_map_2(f, l):\n # Same as above without comprehension:\n mapped_l = []\n for item in l:\n mapped_l.append( f(item) ) # the extra blanks are just for readability\n return mapped_l",
"... | [
"0.7413123",
"0.6824229",
"0.66875005",
"0.66133267",
"0.64873934",
"0.64618516",
"0.6404975",
"0.6391127",
"0.6374804",
"0.63280153",
"0.62563765",
"0.619054",
"0.6171387",
"0.616641",
"0.6125952",
"0.60744065",
"0.60302013",
"0.600142",
"0.59430134",
"0.59171677",
"0.589648... | 0.7427928 | 0 |
Test that seccomp_level applies to all threads of a process. | def assert_seccomp_level(pid, seccomp_level):
# Get number of threads
cmd = "ps -T --no-headers -p {} | awk '{{print $2}}'".format(pid)
process = run_cmd(cmd)
threads_out_lines = process.stdout.splitlines()
for tid in threads_out_lines:
# Verify each thread's Seccomp status
cmd = "ca... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_pm_Completeness(self):\n pass",
"def test_cpu_ctx_switches():\n result = _run_metric('cpu_ctx_switches')\n assert result.exit_code == 0",
"def libc_prctl_set_securebits():\n # straight from man capabilities(7):\n # \"An application can use the following call to lock itself, and ... | [
"0.52375424",
"0.52238774",
"0.51990134",
"0.5037305",
"0.50269747",
"0.49989778",
"0.49356472",
"0.49299172",
"0.49299172",
"0.48996228",
"0.48676994",
"0.48464948",
"0.4833012",
"0.4829353",
"0.4826476",
"0.4810592",
"0.4801857",
"0.47594967",
"0.47359377",
"0.4722668",
"0.... | 0.7658227 | 0 |
Get total PID CPU percentage, as in system time plus user time. If the PID has corresponding threads, creates a dictionary with the lists of instant loads for each thread. | def get_cpu_percent(pid: int, iterations: int, omit: int) -> dict:
assert iterations > 0
time.sleep(omit)
cpu_percentages = {}
for _ in range(iterations):
current_cpu_percentages = ProcessManager.get_cpu_percent(pid)
assert len(current_cpu_percentages) > 0
for thread_name, task_... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cpu_percent(pid: int) -> Dict[str, Dict[str, float]]:\n _, stdout, _ = run_cmd(GET_CPU_LOAD.format(pid))\n cpu_percentages = {}\n\n # Take all except the last line\n lines = stdout.strip().split(sep=\"\\n\")\n for line in lines:\n # sometimes the firecracker pr... | [
"0.83288795",
"0.75617164",
"0.75390935",
"0.7326858",
"0.71243477",
"0.711953",
"0.7106415",
"0.6946368",
"0.6930687",
"0.68864965",
"0.68565696",
"0.68125904",
"0.6779687",
"0.67210364",
"0.6708312",
"0.66819966",
"0.6630657",
"0.6615433",
"0.6614449",
"0.66140425",
"0.6591... | 0.7589443 | 1 |
Aggregates the results of `get_cpu_percent` into average utilization for the vmm thread, and total average utilization of all vcpu threads | def summarize_cpu_percent(cpu_percentages: dict):
def avg(thread_name):
assert thread_name in cpu_percentages and cpu_percentages[thread_name]
# Generally, we expect there to be just one thread with any given name, but sometimes there's two 'firecracker'
# threads
data = list(cpu_p... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cpu_percent(self):\n self.monitoring_object['cpu_percent'] = \\\n psutil.cpu_percent(interval=1, percpu=True)",
"def calculate_score_vm(self, vm):\n vm_cpu_utilization = self.ceilometer. \\\n statistic_aggregation(\n resource_id=vm.uuid,\n met... | [
"0.6822214",
"0.67703855",
"0.6766698",
"0.6720268",
"0.6685836",
"0.6653267",
"0.6576491",
"0.65244013",
"0.6479091",
"0.6467682",
"0.64666563",
"0.6414449",
"0.6348821",
"0.6320592",
"0.6273393",
"0.6240305",
"0.62322235",
"0.62203926",
"0.61952394",
"0.61825454",
"0.617884... | 0.77546805 | 0 |
Wait for a process to terminate. Will return sucessfully if the process got indeed killed or raises an exception if the process is still alive after retrying several times. | def wait_process_termination(p_pid):
try:
_, stdout, _ = run_cmd("ps --pid {} -o comm=".format(p_pid))
except ChildProcessError:
return
raise Exception("{} process is still alive: ".format(stdout.strip())) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_process(pid, timeout=None):\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n # Process is dead\n return True\n else:\n # Process is still ticking\n return None\n\n return wait_condition(process, timeout)",
"de... | [
"0.746104",
"0.746104",
"0.73391795",
"0.716842",
"0.70773476",
"0.6990266",
"0.68312275",
"0.6687879",
"0.6672898",
"0.66042864",
"0.6525021",
"0.6473005",
"0.64443827",
"0.6440825",
"0.63804483",
"0.63362366",
"0.6315719",
"0.62901235",
"0.62555283",
"0.6249286",
"0.6232088... | 0.77298117 | 0 |
Return the version of the firecracker crate, from Cargo.toml. Should be the same as the output of `./firecracker version`, if the code has not been released. | def get_firecracker_version_from_toml():
cmd = "cd ../src/firecracker && cargo pkgid | cut -d# -f2 | cut -d: -f2"
rc, stdout, stderr = run_cmd(cmd)
assert rc == 0, stderr
return packaging.version.parse(stdout) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version():\n version_dict = {}\n exec(open(\"src/chimera/version.py\").read(), version_dict)\n return version_dict['version']",
"def get_version():\n version_file = Path(__file__).resolve().parent / \"clinker\" / \"__init__.py\"\n version_match = re.search(\n r\"^__version__ = ['\\\... | [
"0.68947846",
"0.667615",
"0.66711193",
"0.66711193",
"0.66711193",
"0.6663825",
"0.663384",
"0.6623015",
"0.6570636",
"0.65684336",
"0.6553881",
"0.6541643",
"0.64697933",
"0.64678293",
"0.64664465",
"0.643406",
"0.6432498",
"0.6410594",
"0.6397125",
"0.63920736",
"0.6388049... | 0.83061004 | 0 |
Return whether Firecracker supports io_uring for the running kernel ... ...version. | def is_io_uring_supported():
return compare_versions(get_kernel_version(), MIN_KERNEL_VERSION_FOR_IO_URING) >= 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def race_detector_supported(goroot: GoRoot) -> bool:\n if goroot.goos == \"linux\":\n return goroot.goarch in (\"amd64\", \"ppc64le\", \"arm64\", \"s390x\")\n elif goroot.goos == \"darwin\":\n return goroot.goarch in (\"amd64\", \"arm64\")\n elif goroot.goos in (\"freebsd\", \"netbsd\", \"op... | [
"0.60620135",
"0.6058219",
"0.5950056",
"0.5902196",
"0.5836686",
"0.5798585",
"0.5753207",
"0.57202065",
"0.57157874",
"0.5714625",
"0.56901246",
"0.5674185",
"0.5644311",
"0.56422937",
"0.5637251",
"0.56064665",
"0.5578758",
"0.5546028",
"0.55302817",
"0.552998",
"0.5525724... | 0.89534605 | 0 |
Build `GET` request to fetch metadata from MMDS. | def generate_mmds_get_request(ipv4_address, token=None, app_json=True):
cmd = "curl -m 2 -s"
if token is not None:
cmd += " -X GET"
cmd += ' -H "X-metadata-token: {}"'.format(token)
if app_json:
cmd += ' -H "Accept: application/json"'
cmd += " http://{}/".format(ipv4_address)... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mds(self, data=True, limit=1000, args=None, guids=None, save=True):\n\n if guids is None:\n if args is None:\n murl = \"{}/mds/metadata?limit={}\".format(self._endpoint, limit)\n else:\n murl = \"{}/mds/metadata?limit={}&{}\".format(self._endpoint,... | [
"0.6447606",
"0.5846681",
"0.5795521",
"0.56690586",
"0.5652469",
"0.55951494",
"0.5566249",
"0.5527905",
"0.55269754",
"0.54832304",
"0.5420534",
"0.5392542",
"0.5356662",
"0.53363276",
"0.5333375",
"0.5295881",
"0.52953446",
"0.52225935",
"0.52201164",
"0.52176523",
"0.5214... | 0.71962905 | 0 |
Populate the MMDS data store of the microvm with the provided data | def populate_data_store(test_microvm, data_store):
response = test_microvm.api.mmds.get()
assert response.json() == {}
test_microvm.api.mmds.put(**data_store)
response = test_microvm.api.mmds.get()
assert response.json() == data_store | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def archive_mds_data(self, lmtdb):\n\n dataset_names = [\n 'mdservers/cpuload',\n ]\n\n self.init_datasets(dataset_names, lmtdb.mds_names)\n\n # Now query the MDS_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_mds_data(self.query... | [
"0.5887385",
"0.58831227",
"0.58340955",
"0.5806593",
"0.5788428",
"0.5772362",
"0.5762037",
"0.5743562",
"0.57123286",
"0.57081795",
"0.56953865",
"0.56356686",
"0.5614732",
"0.55909765",
"0.5561978",
"0.5539788",
"0.5539297",
"0.5523665",
"0.55190635",
"0.5515125",
"0.55011... | 0.7923905 | 0 |
Start binary process into a screen session. | def start_screen_process(screen_log, session_name, binary_path, binary_params):
start_cmd = "screen -L -Logfile {logfile} " "-dmS {session} {binary} {params}"
start_cmd = start_cmd.format(
logfile=screen_log,
session=session_name,
binary=binary_path,
params=" ".join(binary_params... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def startComponent(self, opts):\n screen = self.findScreen(opts.verbose)\n\n if screen != None:\n print(\"Screen session %s already started.\" % (screen), file=sys.stderr)\n return False\n\n chdir(self._binaryPath, opts.verbose)\n\n cmd = \"%s %s.exe %s\" % (self._... | [
"0.6976709",
"0.63463855",
"0.62923455",
"0.6287821",
"0.62865543",
"0.6258117",
"0.62508947",
"0.62495404",
"0.62277424",
"0.62228143",
"0.6196655",
"0.61962956",
"0.6155768",
"0.6147755",
"0.61442816",
"0.60830754",
"0.6082919",
"0.60748583",
"0.6054233",
"0.60395277",
"0.6... | 0.7684353 | 0 |
Wait for a process to run. Will return successfully if the process is in a running state and will otherwise raise an exception. | def wait_process_running(process):
assert process.is_running() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def wait(self):\n if self.poll() is None:\n await wait_child_exiting(self)\n self._proc.wait()\n else:\n await _core.checkpoint()\n return self.returncode",
"def wait():\r\n win32event.WaitForSingleObject(hProcess,\r\n ... | [
"0.72903633",
"0.7093548",
"0.70814455",
"0.70814455",
"0.7048787",
"0.7028544",
"0.69623494",
"0.6829046",
"0.68131113",
"0.6803098",
"0.67237234",
"0.6645872",
"0.66076875",
"0.65566254",
"0.64550054",
"0.63478976",
"0.63458306",
"0.63446194",
"0.6254119",
"0.6125616",
"0.6... | 0.7995684 | 0 |
Test config entry diagnostics. | async def test_entry_diagnostics(
hass: HomeAssistant,
config_entry,
hass_client: ClientSessionGenerator,
setup_config_entry,
) -> None:
assert await get_diagnostics_for_config_entry(hass, hass_client, config_entry) == {
"entry": {
"entry_id": config_entry.entry_id,
"... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Number of examples', spec)\n self.assertIn('Maximum number of columns to change', spec)\n self.assertIn('Regression threshold', spec)\n self.assertIn('Prediction key', spec)",
"def test_config(app):\n assert app.testi... | [
"0.6630062",
"0.65424734",
"0.6377232",
"0.6363308",
"0.6259676",
"0.6159612",
"0.611726",
"0.6091105",
"0.60782284",
"0.6073323",
"0.60627615",
"0.605891",
"0.6056934",
"0.60558814",
"0.6039397",
"0.6037984",
"0.6020072",
"0.60186297",
"0.59983885",
"0.5994481",
"0.5989092",... | 0.6894157 | 0 |
Plots the training and validation loss | def plot_loss(x, loss_train, loss_valid, title):
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title(title)
plt.plot(x, loss_train, '-b', label='Training')
plt.plot(x, loss_valid, '-r', linestyle=(0, (1, 2)), label='Validation')
plt.legend(["Training", "Validation"], loc="upper rig... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_loss(training_errors, validation_errors):\n plt.xscale('Log')\n plt.xlabel('Epochs')\n plt.ylabel('Mean Actual Error')\n plt.plot(training_errors, label = \"Training Error\", \\\n color = 'blue')\n plt.plot(validation_errors, label = \"Validation Error\", \\\n color = 'red')\n... | [
"0.8181959",
"0.7967187",
"0.7908213",
"0.7882923",
"0.784963",
"0.7838155",
"0.7808386",
"0.779582",
"0.77136517",
"0.7699465",
"0.76265264",
"0.758413",
"0.75835973",
"0.7577715",
"0.75752705",
"0.7520884",
"0.75018513",
"0.7480222",
"0.7461393",
"0.74384344",
"0.7422205",
... | 0.7982152 | 1 |
You have d dice, and each die has f faces numbered 1, 2, ..., f. Return the number of possible ways (out of fd total ways) modulo 10^9 + 7 to roll the dice so the sum of the face up numbers equals target. | def numRollsToTarget(self, d, f, target):
@lru_cache(None)
def backtrack(d, t):
"""
backtracking
Args:
d(int): int
t(int): int
Returns:
int:
"""
if t < 0:
return 0
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fours(dice):\n for i, j, k, l in combinations(dice, 4):\n if i == j == k == l:\n return 4 * i\n\n return 0",
"def sixes_points(dice_list):\n return dice_list.count(6) * 6",
"def fours(dice):\n return sum([x for x in dice if x == 4])",
"def sixes(dice):\n ... | [
"0.6980511",
"0.6557521",
"0.6477503",
"0.6394374",
"0.6348324",
"0.6332071",
"0.6288005",
"0.6248054",
"0.62239873",
"0.62001395",
"0.6194704",
"0.6118057",
"0.6100825",
"0.603977",
"0.6028606",
"0.6014085",
"0.5997667",
"0.5987392",
"0.5935156",
"0.5925972",
"0.59212106",
... | 0.74255085 | 0 |
Return the maximum possible sum of a contiguous subarray of array. | def max_contiguous_subarray(array):
if len(array) > 0:
global_max = array[0]
current_max = array[0]
for item in array[1:]:
current_max = max(current_max + item, item)
global_max = max(global_max, current_max)
return global_max
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_subarray(array):\n\tmax_sum = 0\n\n\tmax_local_sum = 0\n\tfor i, value in enumerate(array):\n\t\tmax_local_sum += value\n\t\tif max_local_sum < 0:\n\t\t\tmax_local_sum = 0\n\t\telse:\n\t\t\tmax_sum = max(max_sum, max_local_sum)\n\n\treturn max_sum",
"def max_noncontiguous_subarray(array):\n if len(arr... | [
"0.8202246",
"0.7916394",
"0.77453434",
"0.7333391",
"0.7310374",
"0.72519475",
"0.7203545",
"0.7059973",
"0.68492466",
"0.68402284",
"0.6802621",
"0.6792351",
"0.67439",
"0.66859096",
"0.6684533",
"0.661008",
"0.65974057",
"0.65576714",
"0.64860016",
"0.64567614",
"0.6436624... | 0.80561763 | 1 |
Return the maximum possible sum of a noncontiguous subarray of array. | def max_noncontiguous_subarray(array):
if len(array) > 0:
all_negative = True
max_negative = None
sum_items = 0
for item in array:
if item >= 0:
all_negative = False
sum_items += item
else:
if max_negative is Non... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_subarray(array):\n\tmax_sum = 0\n\n\tmax_local_sum = 0\n\tfor i, value in enumerate(array):\n\t\tmax_local_sum += value\n\t\tif max_local_sum < 0:\n\t\t\tmax_local_sum = 0\n\t\telse:\n\t\t\tmax_sum = max(max_sum, max_local_sum)\n\n\treturn max_sum",
"def max_contiguous_subarray(array):\n if len(array)... | [
"0.82961255",
"0.77139294",
"0.7596466",
"0.7196577",
"0.715294",
"0.7006346",
"0.70032346",
"0.6886802",
"0.68664634",
"0.68311334",
"0.6787063",
"0.67563164",
"0.66991204",
"0.6661166",
"0.64700055",
"0.6466913",
"0.6465258",
"0.64573425",
"0.6438339",
"0.6273083",
"0.62503... | 0.7972583 | 1 |
Wraps the function `jax_fun`, dispatching to the decorated function if the argument is complex. | def add_complex_wrap(jax_fun):
def add_complex_wrap_decorator(complex_fun):
@wraps(jax_fun)
def wrapped_fun(x):
if iscomplex(x):
return jax_fun(x)
else:
return complex_fun(x)
return wrapped_fun
return add_complex_wrap_decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wrap(fun):\n class tmp:\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n def __call__(self, x):\n args, kwargs = list(self.args), self.kwargs\n args.insert(i, x)\n return fu... | [
"0.61633724",
"0.5822917",
"0.53867716",
"0.53562367",
"0.5307803",
"0.5287246",
"0.5256241",
"0.5147923",
"0.5147923",
"0.5137637",
"0.51263404",
"0.50915116",
"0.50890017",
"0.50849926",
"0.5083033",
"0.50597",
"0.50597",
"0.50536984",
"0.5050015",
"0.5029219",
"0.50189996"... | 0.8649937 | 0 |
Returns a list of meetings with notice documents added to applicable dates | def _parse_notice(self, response):
notice_documents = self._parse_notice_documents(response)
meetings_list = []
for meeting in response.meta.get('upcoming', []):
# Check if the meeting date is in any document title, if so, assign docs to that meeting
meeting_date_str = '{... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_past_meetings(self, response):\n meetings = []\n for item in response.css('table.table-striped tbody tr'):\n dt_str = item.css('time::text').extract_first()\n meetings.append({\n 'start': {\n 'date': datetime.strptime(dt_str, '%b %d, ... | [
"0.6048299",
"0.5839136",
"0.5809278",
"0.5704421",
"0.56848085",
"0.5572619",
"0.5572152",
"0.5535616",
"0.5491324",
"0.54627925",
"0.5451501",
"0.54415005",
"0.5403808",
"0.53903556",
"0.5383336",
"0.53667355",
"0.53654236",
"0.53399014",
"0.52882046",
"0.5287882",
"0.52864... | 0.73879534 | 0 |
Get document links from notice page, ignoring mailto and flyer links | def _parse_notice_documents(self, response):
notice_documents = []
for doc in response.css('article.full a[href]'):
doc_text = doc.css('*::text').extract_first()
if 'mailto' in doc.attrib['href'] or 'flyer' in doc_text.lower():
continue
notice_document... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_document_links(self, response):\n document_links = []\n for link in response.css(\".view-site-documents .view-content .field-content a\"):\n document_links.append(\n {\n \"title\": link.xpath(\"./text()\").extract_first(),\n \... | [
"0.68416214",
"0.68373984",
"0.67045367",
"0.6525906",
"0.59452546",
"0.59382564",
"0.59187204",
"0.59051454",
"0.5856391",
"0.5835481",
"0.5744894",
"0.57298803",
"0.57297236",
"0.5729125",
"0.5728193",
"0.5714317",
"0.57109016",
"0.5692153",
"0.56779367",
"0.56766176",
"0.5... | 0.78274935 | 0 |
Combines upcoming and past meetings and yields results ignoring duplicates | def _parse_combined_meetings(self, response):
meetings = self._parse_past_meetings(response)
meeting_dates = [meeting['start']['date'] for meeting in meetings]
for meeting in response.meta.get('upcoming', []):
if meeting['start']['date'] not in meeting_dates:
meeting... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_past_meetings(self, response):\n meetings = []\n for item in response.css('table.table-striped tbody tr'):\n dt_str = item.css('time::text').extract_first()\n meetings.append({\n 'start': {\n 'date': datetime.strptime(dt_str, '%b %d, ... | [
"0.66944045",
"0.6520652",
"0.6458132",
"0.64081097",
"0.6286767",
"0.62485164",
"0.59121764",
"0.5868881",
"0.5843999",
"0.5840361",
"0.5805768",
"0.5798697",
"0.57930505",
"0.5737826",
"0.5696575",
"0.5654834",
"0.5645113",
"0.5636026",
"0.5623064",
"0.56101334",
"0.5589214... | 0.7418834 | 0 |
Returns a list of start date and documents from meeting minutes page | def _parse_past_meetings(self, response):
meetings = []
for item in response.css('table.table-striped tbody tr'):
dt_str = item.css('time::text').extract_first()
meetings.append({
'start': {
'date': datetime.strptime(dt_str, '%b %d, %Y').date()... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_meeting_timeline(meeting_count):\n start_meet, end_meet = [], []\n for i in range(1, meeting_count+1):\n while True:\n start_time = int(input(f'Enter the start time of meeting {i}: '))\n end_time = int(input(f'Enter the end time of meeting {i}: '))\n if start_t... | [
"0.59232897",
"0.59062165",
"0.5727278",
"0.55558705",
"0.5543141",
"0.55137223",
"0.5504645",
"0.5475457",
"0.5456481",
"0.5455918",
"0.54104835",
"0.53369486",
"0.5334501",
"0.5317548",
"0.53066015",
"0.5306532",
"0.5298339",
"0.52790225",
"0.52696097",
"0.5261282",
"0.5260... | 0.59903324 | 0 |
Get the most recently rotated log | def _get_logrotated_log(self):
file_lst = glob.glob(self.rotation_pattern)
file_lst.remove(self.log_filename)
if len(file_lst) == 0:
return None
stat_lst = [(os.stat(x).st_mtime, x) for x in file_lst]
sorted_stat_lst = sorted(stat_lst, key=lambda x: x[1])
so... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def last_log(self) -> List:\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_list: List = sorted(full_list, key=os.path.getmtime)\n return time_sorted_list[-1]",
"def most_recent_read(self):\n self.read_... | [
"0.65318114",
"0.64231694",
"0.614372",
"0.60083914",
"0.5977901",
"0.5877391",
"0.5812588",
"0.573757",
"0.5723416",
"0.5704916",
"0.57025534",
"0.5626169",
"0.5538639",
"0.5531393",
"0.5504952",
"0.5479332",
"0.54694706",
"0.5460089",
"0.54365224",
"0.5427365",
"0.5427365",... | 0.78995377 | 0 |
Initialize the mission handler. Reject the mission if the droneSet is empty. Gives random colors to the drones ins the droneSet. Save the newly created mission object and saves it in the database. | def __init__(self, dronesSet: DronesSet, missionType: MissionType, initialDronePos: dict, offsetDronePos: dict,
sendMessageCallable: Callable[[Message], None]):
self.RANGE_SCALE: float = (
missionType == 'argos') * self.ARGOS_SCALE + (missionType == 'crazyradio') * self.CRAZYRADIO_S... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.opening_scene = DungeonGate()\n # this list define the order of scenes in the corridor\n self.corridor_scenes = [GuardsRoom(), Cell(), Armory(), EmptyRoom(), Dormitory()]\n shuffle(self.corridor_scenes)\n self.explored_scenes = {\n ... | [
"0.55842113",
"0.5356828",
"0.52747035",
"0.51997954",
"0.5177088",
"0.5164723",
"0.5151623",
"0.5147991",
"0.5134333",
"0.513211",
"0.5111046",
"0.5073858",
"0.50653154",
"0.50625694",
"0.5033095",
"0.5031629",
"0.5003948",
"0.49932712",
"0.49649855",
"0.4953497",
"0.4952016... | 0.6436248 | 0 |
Calculate the point indicated by the given ranges and orientation. Translate to coordinates from the received axis to the dashboard axis. | def onReceivedPositionAndRange(self, droneName: str, position: Vec2, yaw: float, ranges: List[int]):
realYaw = yaw
points: List[Vec2] = []
position['x'] -= self.initialDronePos[droneName]['x']
position['y'] -= self.initialDronePos[droneName]['y']
if self.mission['type'] == 'ar... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pyr_point_translator(x, y, org_l, dest_l):\n dest_x = (2.0 ** (org_l - dest_l)) * x\n dest_y = (2.0 ** (org_l - dest_l)) * y\n return np.array([dest_x, dest_y]).transpose()",
"def _axes_domain(self, *args, **kwargs):\n # See _add_gridline_label for detials\n lon_0 = self.axes.projection.proj4_... | [
"0.53945553",
"0.5369049",
"0.5204388",
"0.5188294",
"0.5168471",
"0.5167131",
"0.5166374",
"0.51458585",
"0.51410073",
"0.5111717",
"0.5095342",
"0.5087045",
"0.5066198",
"0.5044947",
"0.5043451",
"0.50419384",
"0.4990512",
"0.49884152",
"0.49825215",
"0.49820203",
"0.497555... | 0.572348 | 0 |
Check if the given point isn't too close to an already existing point. | def checkPointValidity(self, point: Tuple[float, float]) -> bool:
neighbor = self.kdtree.search_nn(point)
if not neighbor:
self.kdtree.add(point)
return True
if neighbor[1] <= self.MIN_POINTS_DIST:
return False
else:
self.kdtree.add(point)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def closeTo(pointOne, pointTwo):\r\n\tif abs(pointOne.lat-pointTwo.lat) < 0.0002:\r\n\t\tif abs(pointOne.lon-pointTwo.lon) < 0.0002:\r\n\t\t\treturn True\r\n\treturn False",
"def around(self, point, distance):\n return self.distance(point) <= distance",
"def remove_point(self, x):\n idx = np.sum(... | [
"0.677344",
"0.6768161",
"0.6526246",
"0.6513699",
"0.6485867",
"0.6485867",
"0.64215946",
"0.6363954",
"0.635005",
"0.6322869",
"0.63002676",
"0.6277934",
"0.62465405",
"0.6223136",
"0.6210491",
"0.61917126",
"0.61898994",
"0.61837393",
"0.6171086",
"0.61608964",
"0.6139145"... | 0.7145907 | 0 |
Add the new position of the drone as well as the position of the border it found to the mission. Saves the updated mission to the database. | def handlePositionAndBorders(self, droneName: str, position: Vec2, points: List[Vec2]):
newMissionPoints = list(map(lambda point: MissionPoint(
droneName=droneName, value=point), points))
missionPulse = MissionPulse(id=self.mission['id'])
if newMissionPoints:
missionPulse... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self):\n # Move left/right=====\n self.rect.x += self.change_x\n self.rect.y += self.change_y\n visited[int(self.rect.x/32)][int(self.rect.y/32)].append(self.id)\n\n self.path.append((int(self.rect.x/32), int(self.rect.y/32)))\n\n # if(self.rect.x == goal_x) & (... | [
"0.5691412",
"0.56241834",
"0.5621247",
"0.5615071",
"0.5611353",
"0.5584852",
"0.55751413",
"0.5554496",
"0.5547775",
"0.5543947",
"0.55201",
"0.548699",
"0.5458025",
"0.5425846",
"0.5422525",
"0.54196346",
"0.5414458",
"0.5406135",
"0.5383632",
"0.53772956",
"0.5362661",
... | 0.6507238 | 0 |
Goes over all the point found during the mission and try to regroup them into shapes. Then add the created shape to the current mission. | def assignPointsToShapes(self):
pointsCopy = self.mission['points'].copy()
while len(pointsCopy):
shape = []
self.recursiveAddPointToShape(pointsCopy, [pointsCopy[0]], shape)
shape.append(shape[0])
self.mission['shapes'].append(shape) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recursiveAddPointToShape(self, missionPoints: List[MissionPoint], pointsToAdd: List[MissionPoint],\n currentShape: List[Vec2]):\n for point in pointsToAdd:\n currentShape.append(point['value'])\n\n nexPointsToAdd = {}\n pointsToRemove = []... | [
"0.6815821",
"0.612363",
"0.58828986",
"0.57923615",
"0.57276297",
"0.5707279",
"0.5644243",
"0.5609165",
"0.5585768",
"0.55466896",
"0.5538337",
"0.55251724",
"0.5502003",
"0.5434354",
"0.5411434",
"0.5407486",
"0.5376817",
"0.53744084",
"0.5358702",
"0.5353576",
"0.53435344... | 0.7532935 | 0 |
Check if every drone is landed to end the mission. | def checkMissionEnd(self) -> bool:
if getTimestamp() - self.mission['timestamp'] < self.TAKE_OFF_DELAY:
return False
drone: Drone
for drone in self.dronesSet.getDrones().values():
if drone['state'] != 'onTheGround' and drone['state'] != 'crashed':
return F... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_end_game(self):\n return False if (any(self.p1_pits()) and any(self.p2_pits())) else True",
"def check_game_end(self):\n\n return any([i != 0 for i in self.board[0]])",
"def is_done(self):\n return not any((agent.is_alive() for agent in self.agents))",
"def is_done(self):\n ... | [
"0.6552243",
"0.654598",
"0.63412744",
"0.6302783",
"0.6250133",
"0.6250133",
"0.61476976",
"0.6144818",
"0.614153",
"0.6104861",
"0.6099322",
"0.6065373",
"0.60627294",
"0.6027397",
"0.6011374",
"0.5995997",
"0.5975197",
"0.59438217",
"0.59388137",
"0.59343165",
"0.593292",
... | 0.7854534 | 0 |
Force stop the mission and register it as failed. | def stopMission(self):
status: MissionStatus = 'failed'
missionPulse = MissionPulse(
id=self.mission['id'],
status=status,
) # noqa
self.mission['status'] = status
DatabaseService.saveMission(self.mission['id'], self.mission)
self.sendMessageCalla... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def foreceStop(self):\n self.__success = False\n self.stop()",
"def estop(self):\n self.status_message = \"EMERGENCY STOP - Check Rexarm and restart program\"\n self.current_state = \"estop\"\n self.rexarm.disable_torque()",
"def _gracefully_stop(self):\n pass",
"def... | [
"0.6457197",
"0.6409468",
"0.62858284",
"0.62379634",
"0.6163845",
"0.6142196",
"0.6141961",
"0.611906",
"0.6115728",
"0.61096895",
"0.6101109",
"0.60758865",
"0.6075437",
"0.60485166",
"0.60287595",
"0.6011648",
"0.5979752",
"0.59785706",
"0.59762985",
"0.5975325",
"0.595054... | 0.641447 | 1 |
Ensure that a deserialized Project looks sane. | def test_simple_deserialization(valid_data):
project: Project = Project.build(valid_data)
assert project.uid == UUID(valid_data['id'])
assert project.created_at == arrow.get(valid_data['created_at'] / 1000).datetime
assert project.name == 'my project'
assert project.status == 'in-progress' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_project(cls, project):\n \n if project is None:\n raise TypeError(\"Sequence.project can not be None\")\n \n if not isinstance(project, Project):\n raise TypeError(\"The project should be and instance of \"\n \"oyProjectManager... | [
"0.68828285",
"0.67649096",
"0.6513282",
"0.6298913",
"0.6298913",
"0.6079381",
"0.605389",
"0.59936476",
"0.5902406",
"0.5821863",
"0.58024466",
"0.57933676",
"0.5701468",
"0.56036913",
"0.5592989",
"0.5559941",
"0.55275893",
"0.5523878",
"0.551632",
"0.550843",
"0.55052364"... | 0.7065777 | 0 |
Ensure that a serialized Project looks sane. | def test_serialization(valid_data):
project: Project = Project.build(valid_data)
serialized = project.dump()
assert serialized == valid_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_simple_deserialization(valid_data):\n project: Project = Project.build(valid_data)\n assert project.uid == UUID(valid_data['id'])\n assert project.created_at == arrow.get(valid_data['created_at'] / 1000).datetime\n assert project.name == 'my project'\n assert project.status == 'in-progress'... | [
"0.6879066",
"0.657145",
"0.64072245",
"0.6398895",
"0.6109679",
"0.6032338",
"0.60129434",
"0.5995189",
"0.5987971",
"0.5916274",
"0.5797578",
"0.5797578",
"0.5703068",
"0.5651922",
"0.5539019",
"0.5537114",
"0.54497784",
"0.5432663",
"0.5410762",
"0.53860843",
"0.53771704",... | 0.7294209 | 0 |
Runs inference on an image. | def run_inference_on_image(image):
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_inference_on_image(image):\n if not gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = gfile.FastGFile(image, 'rb').read()\n\n # Creates graph from saved GraphDef.\n create_graph()\n\n with tf.Session() as sess:\n # Runs the softmax tensor by feeding the imag... | [
"0.80876434",
"0.7585265",
"0.7535307",
"0.7361889",
"0.72406197",
"0.7163995",
"0.70659184",
"0.69971216",
"0.69961894",
"0.69929576",
"0.69677186",
"0.6897145",
"0.68879974",
"0.6879423",
"0.6831557",
"0.68114525",
"0.67840654",
"0.6757619",
"0.67368376",
"0.6732527",
"0.67... | 0.76775897 | 1 |
Translates an artist name into a Spotify artist id. | def get_artist_id(artist_name: str, api_key: str):
# Get token
token_url = "https://accounts.spotify.com/api/token"
authorization = base64.standard_b64encode(api_key.encode('utf-8')).decode('utf-8')
headers = {'Authorization' : 'Basic ' + authorization}
data = "grant_type=client_credentials".encode(... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_for_artist(name):\n\ttoken = get_token()\n\tif token:\n\t\theaders = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + token}\n\t\toptions = {\n\t\t\t'q': name, 'type': 'artist', 'limit': '1'\n\t\t}\n\n\t\tresponse = requests.get(\n\t\t\t'https://api.spotify.com/v1/search',\n\t\t... | [
"0.6579793",
"0.61999375",
"0.6080319",
"0.60751194",
"0.6036611",
"0.5939806",
"0.5904006",
"0.5902831",
"0.5887923",
"0.5886538",
"0.58761424",
"0.5864386",
"0.5772325",
"0.5623332",
"0.5613384",
"0.5603011",
"0.5584486",
"0.55721587",
"0.5566795",
"0.5550294",
"0.5540571",... | 0.71629184 | 0 |
Generate the waveform for a current that starts at zero and is stepped up to the given amplitude at time t_stop/10. | def step(amplitude, t_stop):
times = np.array([0, t_stop/10, t_stop])
amps = np.array([0, amplitude, amplitude])
return times, amps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wave_tx_stop():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVHLT, 0, 0))",
"def wave_function(x, t, max_ampli=1, oscillation_period=1, init_deflection=0):\n time_relation = t / oscillation_period\n space_relation = x / (conf.WAVE_PROPAGATION * oscillation_period)\n\n # sinput = sin + input\n ... | [
"0.6297796",
"0.6082015",
"0.5815902",
"0.57877296",
"0.56793946",
"0.56688744",
"0.55939317",
"0.55819577",
"0.55242807",
"0.55218697",
"0.5481293",
"0.5452606",
"0.5451002",
"0.5436651",
"0.5414957",
"0.5389285",
"0.53582424",
"0.53259397",
"0.53171223",
"0.52977514",
"0.52... | 0.6923114 | 0 |
Returns the PID parameters. | def get_pid(self):
return self.k_p, self.k_i, self.k_d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_parameters():\n\n setpoint = [0., 0., 0., 0.]\n kp_param = [0., .0, 0., 0.]\n ki_param = [0., 0., 0., 0.]\n kd_param = [0, 0, 0, 0]\n inverted = [False, False, False, False]\n hold = [False, False, False, False]\n int_reset = [False, False, False, False]\n int_auto = [False, False, ... | [
"0.7134277",
"0.68215334",
"0.67325944",
"0.66690856",
"0.66621333",
"0.66224796",
"0.66224796",
"0.6620825",
"0.65094316",
"0.63697153",
"0.6361125",
"0.63547695",
"0.63419104",
"0.63262755",
"0.6315767",
"0.63109344",
"0.6281166",
"0.62565774",
"0.6243989",
"0.61786604",
"0... | 0.7148574 | 0 |
Resets the sum for I part in PID controller. | def reset_sum(self):
self.sum_e = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self,):\n \n self.i = 0\n self.pi = 1.0\n self.si = 0.0\n self.pi_min = float(\"inf\")\n self.si_min = float(\"inf\")",
"def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0",
"def reset(self):\n self.sum = [0.] * len(self.topk)\n... | [
"0.6734358",
"0.6594717",
"0.65548724",
"0.64758915",
"0.64260775",
"0.6420713",
"0.64191103",
"0.64191103",
"0.63902366",
"0.63468647",
"0.63305706",
"0.6325734",
"0.62899965",
"0.62425673",
"0.62286294",
"0.6172447",
"0.61641145",
"0.61571854",
"0.61498404",
"0.6144145",
"0... | 0.6990316 | 0 |
Returns the control input omega for the specified vehicle. | def _get_omega(self, vehicle_id):
pos = self.positions[vehicle_id]
omega = self.frenets[vehicle_id].get_omega(
pos[0], pos[1], pos[2], pos[3])
return omega | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def omega(self):\n return self._omega",
"def omega(self, forceCalculate=False, verbose=0):\n\n if self._omegaExists and not forceCalculate:\n return self._omega\n\n self.incidentWavefunction.gaugeField(verbose=verbose)\n self.targetWavefunction.adjointWilsonLine(verbose=ver... | [
"0.6391047",
"0.60018724",
"0.57415396",
"0.5733592",
"0.5685982",
"0.5655346",
"0.5607157",
"0.55864197",
"0.5533074",
"0.5496616",
"0.53909785",
"0.53708667",
"0.5355509",
"0.5332758",
"0.53159285",
"0.5285314",
"0.52425337",
"0.51883626",
"0.5184026",
"0.5176245",
"0.51741... | 0.75547147 | 0 |
Returns the velocity control signal for the given vehicle. Calculates the distance error to the vehicle in front and gets the velocity from the PID controller. | def _get_vel(self, vehicle_id):
id1 = self.vehicle_ids[self.vehicle_ids.index(vehicle_id) - 1]
pos1 = self.positions[id1]
pos2 = self.positions[vehicle_id]
dist = self.pt.get_distance([pos1[0], pos1[1]], [pos2[0], pos2[1]])
e = self.e_ref - (dist - self.distance_offset)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_speed(vehicle):\n vel = vehicle.get_velocity()\n\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)",
"def get_speed(vehicle):\n vel = vehicle.get_velocity()\n velocity = math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)\n\n return np.array([velocity])",
"def get_control(self, ... | [
"0.61187434",
"0.6118345",
"0.6053016",
"0.5852841",
"0.57715523",
"0.57453",
"0.56313854",
"0.56313854",
"0.55368686",
"0.55247694",
"0.55183417",
"0.5509164",
"0.5502659",
"0.5490078",
"0.54808605",
"0.5460431",
"0.54296684",
"0.54124415",
"0.5374918",
"0.53626955",
"0.5327... | 0.6271286 | 0 |
Sets a new reference ellipse path. | def set_reference_path(self, radius, center=None, pts=400):
if center is None:
center = [0, 0]
self.pt.gen_circle_path(radius, pts, center) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_path_service(self, new_path):\n self.__repo.set_path_repo(new_path)",
"def setPath(self, path):\n libxml2mod.xmlURISetPath(self._o, path)",
"def setPath(self, path):\n if self._path != path:\n self._path = path\n self.__update_preview()",
"def set_current_to... | [
"0.600749",
"0.58211917",
"0.56862533",
"0.5653296",
"0.5627035",
"0.5622959",
"0.56046",
"0.5573325",
"0.5551815",
"0.5539654",
"0.55165696",
"0.5484212",
"0.5467029",
"0.54660916",
"0.54660916",
"0.54660916",
"0.54660916",
"0.54660916",
"0.54481995",
"0.5413508",
"0.5399804... | 0.6184485 | 0 |
Calculate the discriminator performance | def calc_disc_perf(self, imgs_fake: torch.Tensor, imgs_real: torch.Tensor):
# create labels
real, fake = self.create_labels(imgs_real.size(0), imgs_fake.size(0))
real, fake = real.int(), fake.int()
# How well can it label as fake?
fake_output = self.discriminator(imgs_fake)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discriminator(self):\n\n # Initializate the neural network\n discriminator = Sequential()\n\n # Convolution, bias, activate\n discriminator.add(Conv2D(filters=self.first_layer_filter,\n kernel_size=self.kernel_size,\n s... | [
"0.63954556",
"0.63888186",
"0.62307453",
"0.6068661",
"0.59837025",
"0.5971976",
"0.59599644",
"0.5959183",
"0.59578776",
"0.59511286",
"0.5939968",
"0.59390277",
"0.59313715",
"0.5928002",
"0.5880307",
"0.5880307",
"0.5878475",
"0.58643734",
"0.58518994",
"0.585023",
"0.583... | 0.6676827 | 0 |
Download the MNIST data if it doesn't exist in processed_folder already. | def download(self):
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url in self.resources:
filename = url.rpartition('/')[2]
download_and_extr... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_mnist(base_url, base_dir):\n\n download_files(base_url, base_dir)\n process_images(base_dir, \"train\")\n process_images(base_dir, \"t10k\")",
"def check_fetch_mnist():\n # py3k version is available at mnist_py3k.pkl.gz ... might need to fix\n url = 'http://www.iro.umontreal.ca/~lisa/... | [
"0.7551862",
"0.7125235",
"0.70789623",
"0.704323",
"0.699568",
"0.6890501",
"0.6659727",
"0.65201825",
"0.6440362",
"0.6423504",
"0.6393885",
"0.6365736",
"0.6337188",
"0.6323313",
"0.6263566",
"0.62273705",
"0.61611176",
"0.6154478",
"0.6138575",
"0.61181855",
"0.61141074",... | 0.72701216 | 1 |
Execute the NLP pipeline on the given text and language | def process_text(self, text, language): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)",
"def __call__(self, parser, text):\n try:\n nltk_parse_tree = parser(text)\n\n self._evaluate(nltk_parse_tree)\n except:\n self.player.res... | [
"0.6686367",
"0.64434546",
"0.63922846",
"0.6383538",
"0.62450373",
"0.62264246",
"0.6168433",
"0.59721196",
"0.5963083",
"0.5930044",
"0.58999956",
"0.5894691",
"0.5889573",
"0.5843738",
"0.58319145",
"0.582083",
"0.5812574",
"0.5810435",
"0.5808556",
"0.57884467",
"0.578164... | 0.7934864 | 0 |
returns true if the given word is a stop word (within the given language) | def is_stopword(self, word, language): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_stop_word(word):\n return word in final_stop_words",
"def is_stop_word(self, word):\n pass",
"def is_stopword(word):\n\tstop_words = nltk.corpus.stopwords.words('english')\n\treturn word in stop_words",
"def stop_word(w): # local feature\n return (w in swl)",
"def filter1(... | [
"0.83715206",
"0.83041",
"0.80014837",
"0.78062886",
"0.767101",
"0.76048696",
"0.7403305",
"0.7161536",
"0.7113699",
"0.68114984",
"0.68106383",
"0.6512114",
"0.64741874",
"0.64187497",
"0.6386675",
"0.6367486",
"0.6277749",
"0.62659085",
"0.62268203",
"0.620168",
"0.6190485... | 0.94214183 | 0 |
returns true if the given word is a punctuation word (within the given language) | def is_punct(self, word, language): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_punctuation(text):\n return not (text.lower() in AVRO_VOWELS or\n text.lower() in AVRO_CONSONANTS)",
"def is_punctuation(ch):\n if (ch == '.'): return False\n if (ch >= '!' and ch <= '/'): return True\n if (ch >= ':' and ch <= '@'): return True\n if (ch >= '\\u2010' and ch <=... | [
"0.8043411",
"0.7695091",
"0.76177907",
"0.7533604",
"0.7463532",
"0.7455915",
"0.744859",
"0.73916805",
"0.70031446",
"0.6972978",
"0.69672704",
"0.69587123",
"0.6586117",
"0.64089644",
"0.6372763",
"0.63423836",
"0.632881",
"0.63278973",
"0.6094414",
"0.6063989",
"0.6033848... | 0.9143066 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.