query stringlengths 9 9.05k | document stringlengths 10 222k | metadata dict | negatives listlengths 30 30 | negative_scores listlengths 30 30 | document_score stringlengths 4 10 | document_rank stringclasses 2
values |
|---|---|---|---|---|---|---|
Adds extra device names that we know explicitly from some external source. | def addExtraDevices(self):
# These tables were extracted from
# pirates/src/piratesgui/GameOptions.py.
ati_device_list = [
["ATI MOBILITY/RADEON X700", 0x5653],
[1, "Radeon X1950 XTX Uber - Limited Edition", 0x7248],
[1, "Radeon X1950 XTX Ub... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_extra_args(self):\n self.parser.add_argument('--device', dest='device', type=str, help='Device ID, e.g. d--0001')",
"def addDevice(self, node, fullDeviceName, device):",
"def load_devices():",
"def addDeviceDescriptor(string: str, deviceDescriptor: cern.japc.core.DeviceDescriptor) -> None:\n ... | [
"0.6235411",
"0.6018107",
"0.5875037",
"0.5769226",
"0.5737719",
"0.5699172",
"0.5628838",
"0.56115973",
"0.55473393",
"0.5530301",
"0.5512926",
"0.55042124",
"0.54661566",
"0.5442146",
"0.54258853",
"0.5409304",
"0.53862745",
"0.5350149",
"0.53231454",
"0.52631706",
"0.52626... | 0.64580566 | 0 |
Counts the frequencies of samples of given variables ``vars`` and calculates probabilities with additive smoothing. | def get_probs(self, *vars):
freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])
k = np.prod([len(v.values) for v in vars])
return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_posterior_probs(vars_):\n vars_.weighted_sums += np.power(vars_.dprime_map[vars_.focus],2) * vars_.visual_field\n vars_.post_probs = np.exp(vars_.weighted_sums) * vars_.prior_prob\n vars_.post_probs /= np.sum(vars_.post_probs)",
"def countVarFreq(list_models_vars_freq):\n list_variables_to... | [
"0.57587546",
"0.5575326",
"0.53477293",
"0.52793074",
"0.51654255",
"0.5154966",
"0.5104432",
"0.50244606",
"0.49709153",
"0.49697486",
"0.49534038",
"0.491249",
"0.4908834",
"0.48904198",
"0.48888883",
"0.48864844",
"0.48855996",
"0.48635137",
"0.4834516",
"0.48303828",
"0.... | 0.7346631 | 0 |
If not given, computes the absolute total info gain for attributes a and b. Generates an Interaction object. | def attribute_interactions(self, a, b, total_rel_ig_ab=None):
var_a = self.data.domain.variables[a]
var_b = self.data.domain.variables[b]
ig_a = self.info_gains[var_a.name]
ig_b = self.info_gains[var_b.name]
if not total_rel_ig_ab:
ig_ab = ig_a + ig_b - (self.class_en... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def infoGain(self,attr, data, target_attr):\n remainder = 0\n p = 0\n ent = 0\n for ele in target_attr:\n if ele == 1:\n p +=1\n \n q = p / (len(target_attr)) \n if 0 < q < 1:\n ent = -((q * math.log2(q)) + ((1-q) * math.... | [
"0.6119889",
"0.5544115",
"0.5507098",
"0.54734284",
"0.54698795",
"0.53078294",
"0.53001773",
"0.52430487",
"0.52211547",
"0.5215387",
"0.52137595",
"0.5157385",
"0.51392186",
"0.5137646",
"0.50982416",
"0.5096558",
"0.50840324",
"0.50741106",
"0.50459766",
"0.50332594",
"0.... | 0.6960144 | 0 |
Computes the Interaction objects for n most informative pairs of attributes. For this to work, ``interaction_matrix`` must be called first. It uses a partial sort and then a full sort on the remaining n elements to get the indices of attributes. | def get_top_att(self, n):
if not self.int_M_called:
raise IndexError("Call interaction_matrix first!")
flat_indices = np.argpartition(np.tril(-self.int_matrix, -1).ravel(), n - 1)[:n]
# TODO: Consider using the partial sort from the bottleneck module for faster sorting
row_in... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interaction_matrix(self):\n\n self.int_M_called = True\n int_M = np.zeros((self.n, self.n))\n for k in range(self.n):\n for j in range(k+1):\n o = self.attribute_interactions(k, j)\n int_M[k, j] = o.rel_total_ig_ab # Store total information gain\n ... | [
"0.60471845",
"0.54465884",
"0.52681583",
"0.5024728",
"0.4996432",
"0.49814323",
"0.49585322",
"0.49543592",
"0.49077043",
"0.4857076",
"0.48561823",
"0.48455074",
"0.4830593",
"0.48272932",
"0.48052135",
"0.47973144",
"0.47867075",
"0.4769542",
"0.47667968",
"0.47630936",
"... | 0.72928554 | 0 |
Returns the list of names of args/kwargs without defaults from `fun` signature. | def get_required_kwargs(fun, skip_positional=0):
sig = inspect.signature(fun)
# the params from signature with up to skip_positional filtered out
# (less only if there is not enough of positional args)
params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())
if i >... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getArgs(func):\n # exclude the defaults at the end (hence the [:-1])\n args = list(utils.flatten(inspect.getargspec(func)[:-1]))\n return set(args).difference(set([None]))",
"def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return deta... | [
"0.7373131",
"0.70837766",
"0.70198065",
"0.6812793",
"0.6719347",
"0.66698635",
"0.66303456",
"0.65023136",
"0.6472078",
"0.6378711",
"0.63185936",
"0.63058794",
"0.62759775",
"0.6262447",
"0.6222361",
"0.62066025",
"0.6155284",
"0.59993124",
"0.59842724",
"0.5961036",
"0.59... | 0.7168854 | 1 |
When a team is created, its survey is automatically created. | def test_create_team_creates_survey(self):
user = User.create(name='User Foo', email='user@foo.com')
user.put()
code = 'trout viper'
team_response = self.testapp.post_json(
'/api/teams',
{
'name': 'Team Foo',
'code': code,
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_teams_create(self):\n pass",
"def test_create_team(self):\n pass",
"def test_generate_survey(self):\n\n result = generate_survey.apply((self.user.id,\n self.report.get_daily().id)).get()\n self.assertTrue(result, \"should create a surv... | [
"0.6586207",
"0.6538541",
"0.64314604",
"0.6378735",
"0.63344073",
"0.6198191",
"0.61724335",
"0.6105733",
"0.6045564",
"0.60214174",
"0.59957623",
"0.59651864",
"0.5957437",
"0.5896016",
"0.5873307",
"0.5823892",
"0.5811888",
"0.5769924",
"0.57607037",
"0.5720423",
"0.569084... | 0.75194645 | 0 |
You can get the survey for a team you own. | def test_get_for_team(self):
user, team_dict = self.test_create_team_creates_survey()
response = self.testapp.get(
'/api/teams/{}/survey'.format(team_dict['uid']),
headers=self.login_headers(user),
)
survey_dict = json.loads(response.body)
self.assertTrue(... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_questionnaire(self, url, survey_path):\n pass",
"def getSurveys(self, **kwargs):\n response = self.request(\"getSurveys\", **kwargs)\n # print response\n surveys = None\n if response:\n surveys = OrderedDict()\n for survey in response[\"Result\"][\... | [
"0.6343129",
"0.631515",
"0.6284795",
"0.6031469",
"0.58984464",
"0.58517706",
"0.5640203",
"0.56234926",
"0.5607942",
"0.55881244",
"0.55276686",
"0.5523524",
"0.55202866",
"0.54980105",
"0.5456152",
"0.545253",
"0.53732574",
"0.5364306",
"0.53133166",
"0.52972543",
"0.52571... | 0.757884 | 0 |
You can't get a survey for someone else's team. | def test_get_for_other_forbidden(self):
user, team_dict = self.test_create_team_creates_survey()
other = User.create(name='Other', email='other@foo.com')
other.put()
self.testapp.get(
'/api/teams/{}/survey'.format(team_dict['uid']),
headers=self.login_headers(othe... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_for_team(self):\n user, team_dict = self.test_create_team_creates_survey()\n response = self.testapp.get(\n '/api/teams/{}/survey'.format(team_dict['uid']),\n headers=self.login_headers(user),\n )\n survey_dict = json.loads(response.body)\n self... | [
"0.7120516",
"0.6282421",
"0.626813",
"0.618336",
"0.6113319",
"0.6037205",
"0.598216",
"0.5848799",
"0.57429224",
"0.5724888",
"0.570673",
"0.5694278",
"0.5633859",
"0.5616271",
"0.56087244",
"0.5571042",
"0.5523372",
"0.55163455",
"0.5508447",
"0.55008894",
"0.54575586",
... | 0.67539746 | 1 |
Client dict should have portalfriendly metric labels. | def test_metric_labels(self):
team_id = 'Team_foo'
m1 = Metric.create(name='Foo Condition', label='foo_condition')
m2 = Metric.create(name='Bar Condition', label='bar_condition')
Metric.put_multi([m1, m2])
survey = Survey.create(team_id=team_id, metrics=[m1.uid, m2.uid])
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_host_configuration_metrics1(self):\n pass",
"def test_get_host_configuration_metrics(self):\n pass",
"def mock_client_fixture():\n with mock.patch(f\"{PROMETHEUS_PATH}.prometheus_client\") as client:\n counter_client = mock.MagicMock()\n client.Counter = mock.MagicMo... | [
"0.58376956",
"0.57469577",
"0.5601619",
"0.5601619",
"0.5514434",
"0.55044305",
"0.5325245",
"0.5324136",
"0.5164732",
"0.5106117",
"0.50848985",
"0.5079339",
"0.5064023",
"0.50074315",
"0.5007054",
"0.4984392",
"0.4983079",
"0.4963031",
"0.49587086",
"0.4958586",
"0.4957912... | 0.57984996 | 1 |
Pause pattern while self.pauseNow is True return imediatly if self.playStatus == False | def pauseCheck(self):
while (self.playStatus == False and self.pauseNow == True):
self.isPause = True
time.sleep(.25)
self.isPause = False
return self.playStatus | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break",
"def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()",
"def test_pause(self):\n source = procedural.WhiteNoise(0.5)\n ... | [
"0.7879247",
"0.7591245",
"0.75521106",
"0.7544259",
"0.7394605",
"0.72653115",
"0.72582483",
"0.72100365",
"0.7209901",
"0.7202345",
"0.7187789",
"0.7187789",
"0.7176371",
"0.7118577",
"0.71078545",
"0.70944405",
"0.70625925",
"0.70505774",
"0.70505774",
"0.70348316",
"0.703... | 0.78421515 | 1 |
For now, we are only returning the label for the first authorization. | def get_label(self):
auth = self.authorizations[0]
return auth.label | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_label ( self ):\n if self._label is not None:\n return self._label\n return user_name_for( self.name )",
"def _get_label(self):\n return self.label",
"def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name... | [
"0.7064338",
"0.6762954",
"0.65058815",
"0.6465353",
"0.6465353",
"0.6465353",
"0.6465353",
"0.6433205",
"0.6383421",
"0.6355541",
"0.6347802",
"0.6343887",
"0.63365865",
"0.6327651",
"0.6327651",
"0.6300275",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
... | 0.8055274 | 0 |
Change the value of every pixel by following x_n = 0.5x_p^2 where x_n is the new value and x_p is the original value | def change_value(image):
out = None
#####################################
# START YOUR CODE HERE #
#####################################
image = image / 255
out = np.empty_like(image)
height, width, _ = image.shape
for h in range(height):
for w in range(width):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def change_X(X):\n _X = swap_pixels(X.X)\n\n X.update(_X)\n\n return X",
"def recolorPixels(x,y,px, newColorArray):\r\n for i in range(0+coeff1*x,coeff1+coeff1*x):\r\n for j in range(0+coeff1*y,coeff1+coeff1*y):\r\n px[i,j]=newColorArray[x][y]",
"def set_pixel(self, x, y, value):\... | [
"0.6009499",
"0.6003074",
"0.5953545",
"0.58825284",
"0.5864363",
"0.57073236",
"0.5685161",
"0.56669635",
"0.56421584",
"0.55719",
"0.5569989",
"0.5528347",
"0.55130744",
"0.54917955",
"0.54803175",
"0.5448064",
"0.5439173",
"0.5424856",
"0.5372966",
"0.5365994",
"0.53644747... | 0.7488139 | 0 |
Removes a value from the set. Returns true if the set contained the specified element. | def remove(self, val: int) -> bool:
if val in self.set:
self.set.remove(val);
self.nums.remove(val);
return True;
return False; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(self, val: int) -> bool:\n if val in self.set:\n self.set.remove(val)\n return True\n return False",
"def remove(self, val: int) -> bool:\n temp = self.randomSet.pop(val, False)\n return True if temp != False else temp",
"def remove(self, val: int) -... | [
"0.7793552",
"0.7472986",
"0.7311967",
"0.7115737",
"0.70873374",
"0.69927806",
"0.6945106",
"0.68983823",
"0.68566453",
"0.68520516",
"0.68365693",
"0.68351394",
"0.68248504",
"0.6823558",
"0.6823558",
"0.6814836",
"0.67781126",
"0.67604584",
"0.67402226",
"0.6724619",
"0.67... | 0.7555588 | 1 |
Indicates whether the identifier provided is contained in this namespace. | def contains(self, identifier):
uri = identifier if isinstance(identifier, six.string_types) else (
identifier.uri if isinstance(identifier, Identifier) else None
)
return uri.startswith(self._uri) if uri else False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __contains__(self, identifier):\n # following breaks some tests, what is the expected behaviour?\n # return any(m.unique_id.endswith(identifier) for m in self)\n return any(m.unique_id == identifier for m in self)",
"def is_declared(self, identifier: str) -> bool:\n if identifier ... | [
"0.695668",
"0.6656405",
"0.6532839",
"0.64874536",
"0.6400062",
"0.63559645",
"0.6343124",
"0.6330577",
"0.6319895",
"0.6311052",
"0.6293539",
"0.62231743",
"0.619792",
"0.6197703",
"0.6196918",
"0.6167409",
"0.6140008",
"0.60929984",
"0.60690624",
"0.60648376",
"0.60636157"... | 0.77918166 | 0 |
Verify that output table has headers item listed in field_names. | def assertTableHeaders(self, output_lines, field_names):
table = self.parser.table(output_lines)
headers = table['headers']
for field in field_names:
self.assertIn(field, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assert_show_fields(self, show_output, field_names):\n\n # field_names = ['name', 'description']\n # show_output = [{'name': 'fc2b98d8faed4126b9e371eda045ade2'},\n # {'description': 'description-821397086'}]\n # this next line creates a flattened list of all 'keys' (like 'na... | [
"0.71665496",
"0.69928867",
"0.69691926",
"0.6916123",
"0.6892314",
"0.682082",
"0.6780092",
"0.67301995",
"0.6690977",
"0.66392064",
"0.6528734",
"0.6500211",
"0.6498844",
"0.64815634",
"0.6458742",
"0.64520425",
"0.63763213",
"0.62508994",
"0.62471277",
"0.6220344",
"0.6196... | 0.84269035 | 0 |
Create a dictionary from an output | def _get_property_from_output(self, output):
obj = {}
items = self.parser.listing(output)
for item in items:
obj[item['Property']] = str(item['Value'])
return obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_output(output):\n lines = output.splitlines()[3:-1]\n r = {}\n for line in lines:\n kv = filter(None, line.split('|'))\n kv = [x.strip() for x in kv]\n r.update({kv[0]: kv[1]})\n return r",
"def _get_output_dictionary(self):\n\n return_dictionary = {}\n\n ... | [
"0.7458908",
"0.6636358",
"0.6539277",
"0.64794725",
"0.6453821",
"0.64392745",
"0.6378547",
"0.6347167",
"0.6300203",
"0.62663984",
"0.6249434",
"0.6200246",
"0.617561",
"0.6171171",
"0.61182153",
"0.6114068",
"0.61129606",
"0.60749435",
"0.6059385",
"0.6053162",
"0.60514975... | 0.6664644 | 1 |
Wait until object reaches given status. | def wait_for_object_status(self, object_name, object_id, status,
timeout=120, interval=3):
cmd = self.object_cmd(object_name, 'show')
start_time = time.time()
while time.time() - start_time < timeout:
if status in self.cinder(cmd, params=object_id):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']",
"def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n ... | [
"0.73892504",
"0.72170204",
"0.71951866",
"0.70215213",
"0.6863582",
"0.6744071",
"0.6706966",
"0.66450965",
"0.6635064",
"0.65745586",
"0.6519117",
"0.6512439",
"0.65002567",
"0.6495888",
"0.6489756",
"0.6467292",
"0.6467292",
"0.6445491",
"0.6445491",
"0.64176154",
"0.64089... | 0.7880691 | 0 |
Check that object deleted successfully. | def check_object_deleted(self, object_name, object_id, timeout=60):
cmd = self.object_cmd(object_name, 'show')
try:
start_time = time.time()
while time.time() - start_time < timeout:
if object_id not in self.cinder(cmd, params=object_id):
break... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _objectDeleted(self, obj):\n pass",
"def do_deleting(self, request, obj, obj_display, obj_id):\n try:\n with transaction.atomic(savepoint=False):\n self.log_deletion(request, obj, obj_display)\n self.delete_model(request, obj)\n\n return s... | [
"0.7490332",
"0.7215603",
"0.7164006",
"0.7086588",
"0.70412916",
"0.70006657",
"0.69678736",
"0.6930805",
"0.6886216",
"0.68708795",
"0.68376404",
"0.68198115",
"0.6819284",
"0.6812539",
"0.679975",
"0.67971224",
"0.6778325",
"0.6737792",
"0.6717377",
"0.66977274",
"0.669110... | 0.7400769 | 1 |
check sparsemaxloss kernel against numpy | def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
np_l... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n\n... | [
"0.6425822",
"0.64143616",
"0.6307287",
"0.61605656",
"0.6009254",
"0.5761981",
"0.5626876",
"0.5618227",
"0.5602798",
"0.5556589",
"0.55460817",
"0.55382633",
"0.5535278",
"0.5496034",
"0.5479103",
"0.545446",
"0.5430234",
"0.542532",
"0.5417887",
"0.54107744",
"0.5393759",
... | 0.6560093 | 0 |
check sparsemaxloss transfers nan | def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],
[np.nan, np.nan, np.nan]]).astype(dtype)
_, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu)
s... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_neg = np.asarray([\n [0, -np.inf, 0],\n [0, -np.inf, -np.inf],\n [-np.inf, -np.inf, 0],\n [-np.inf, -np.inf, -np.inf],\n ]).astype(dtype)\n z_pos... | [
"0.6993612",
"0.68045354",
"0.6742524",
"0.64270777",
"0.6119363",
"0.59418494",
"0.5930874",
"0.5891737",
"0.583687",
"0.58328086",
"0.57153714",
"0.567385",
"0.56433874",
"0.5623153",
"0.5609487",
"0.5586741",
"0.5584325",
"0.5562963",
"0.5493455",
"0.5464474",
"0.54510564"... | 0.7484194 | 0 |
check sparsemaxloss is infinity safe | def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_neg = np.asarray([
[0, -np.inf, 0],
[0, -np.inf, -np.inf],
[-np.inf, -np.inf, 0],
[-np.inf, -np.inf, -np.inf],
]).astype(dtype)
z_pos = np.asarray... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],\n [np.nan, np.nan, np.nan]]).astype(dtype)\n\n _, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_... | [
"0.72919095",
"0.7114687",
"0.6878583",
"0.6437763",
"0.6230124",
"0.6111207",
"0.60838383",
"0.60647756",
"0.6005",
"0.58566064",
"0.5836664",
"0.5805594",
"0.57603514",
"0.5743544",
"0.5705851",
"0.56542385",
"0.5634526",
"0.5634205",
"0.56317574",
"0.5618935",
"0.55948126"... | 0.7511145 | 0 |
check sparsemaxloss proposition 4 | def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
self.ass... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):\n # construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for\n # delta_0 = 1.\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n z[:, 0] = np.max(z, axis=1) + 1.05\n\n q = np.zeros((test_obs, 10))\n q[:, 0] = 1\n\n t... | [
"0.6196355",
"0.6067124",
"0.6020071",
"0.5861591",
"0.58333623",
"0.5814174",
"0.5738434",
"0.56803995",
"0.5659591",
"0.5651032",
"0.56506085",
"0.5645035",
"0.5634631",
"0.5630038",
"0.56013834",
"0.55691886",
"0.5534191",
"0.5532042",
"0.5531974",
"0.5514072",
"0.5472693"... | 0.660775 | 0 |
check sparsemaxloss proposition 5 | def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):
# construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for
# delta_0 = 1.
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
z[:, 0] = np.max(z, axis=1) + 1.05
q = np.zeros((test_obs, 10))
q[:, 0] = 1
tf_loss_op, tf_... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n\n... | [
"0.6566375",
"0.6140244",
"0.6020924",
"0.583102",
"0.5816435",
"0.5766244",
"0.57547736",
"0.5742387",
"0.5733611",
"0.5728958",
"0.569015",
"0.56716436",
"0.5660839",
"0.56137866",
"0.5600897",
"0.55933416",
"0.55928576",
"0.5521913",
"0.54795593",
"0.5471057",
"0.54647976"... | 0.61877185 | 1 |
Sets the export_host of this ExportResponseMetadata. | def export_host(self, export_host):
self._export_host = export_host | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def response_host(self, response_host):\n\n self._response_host = response_host",
"def response_host(self, response_host):\n\n self._response_host = response_host",
"def host(self, host):\n\n self._host = host",
"def host(self, host):\n\n self._host = host",
"def host(self, host... | [
"0.7138213",
"0.7138213",
"0.644298",
"0.644298",
"0.644298",
"0.644298",
"0.6365714",
"0.6363546",
"0.6119689",
"0.5977553",
"0.5908788",
"0.58681583",
"0.5725987",
"0.5704862",
"0.5632454",
"0.5604846",
"0.5604846",
"0.5506752",
"0.5506752",
"0.5476766",
"0.54253507",
"0.... | 0.8150576 | 0 |
Sets the export_date of this ExportResponseMetadata. | def export_date(self, export_date):
self._export_date = export_date | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_date(self, date):\n self.data['date'] = date",
"def set_date(self, date):\n self.date = date\n return",
"def set_date(self, date):\n self.date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def date... | [
"0.64057446",
"0.63387674",
"0.63318104",
"0.62219906",
"0.62219906",
"0.62219906",
"0.62219906",
"0.62219906",
"0.6208642",
"0.6150361",
"0.6148115",
"0.597754",
"0.59448314",
"0.59330714",
"0.5932477",
"0.5932477",
"0.59302837",
"0.59251916",
"0.5893984",
"0.5889508",
"0.58... | 0.8478781 | 0 |
Sets the requested_object_list of this ExportResponseMetadata. | def requested_object_list(self, requested_object_list):
self._requested_object_list = requested_object_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exported_object_list(self, exported_object_list):\n\n self._exported_object_list = exported_object_list",
"def set_response_list(self, r_list):\n self.response_list = r_list",
"def set_objects(self, objects: list):\n self._objects = objects",
"def set_object_list(self, query, fields,... | [
"0.65758",
"0.58558273",
"0.5800644",
"0.5349972",
"0.51523393",
"0.5088856",
"0.5084719",
"0.5048292",
"0.50420725",
"0.5036184",
"0.50234246",
"0.50130266",
"0.4954567",
"0.4948551",
"0.49092585",
"0.49018076",
"0.49018076",
"0.48524174",
"0.48476678",
"0.47991368",
"0.4791... | 0.843938 | 0 |
Sets the exported_object_list of this ExportResponseMetadata. | def exported_object_list(self, exported_object_list):
self._exported_object_list = exported_object_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, export_host=None, export_date=None, requested_object_list=None, exported_object_list=None): # noqa: E501 # noqa: E501\n\n self._export_host = None\n self._export_date = None\n self._requested_object_list = None\n self._exported_object_list = None\n self.discr... | [
"0.57387066",
"0.572405",
"0.5720836",
"0.50837755",
"0.5052673",
"0.4954005",
"0.49233595",
"0.48638776",
"0.48131937",
"0.47819278",
"0.47225076",
"0.47225076",
"0.47171348",
"0.46870238",
"0.46680313",
"0.46198332",
"0.46057516",
"0.45436734",
"0.45278898",
"0.45272794",
"... | 0.8515711 | 0 |
Clean credentials and batch environment. It cleans a token credential for the user, and the batch environment, in addition to delete all dockers. Also, Command executed by the root in prolog | def clean_environment(ctx, token):
try:
out = ctx.obj.clean_environment(token)
print_message(out)
except BaseException as e:
print_error(e.message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(self, data):\n required = {'admin_token', 'token'}\n api.validate(data, required)\n admin_token = data['admin_token']\n force = True\n self.credentials_module.authorize_admin(admin_token)\n token = data['token']\n containers = self.credentials_module.list_... | [
"0.74475706",
"0.61287606",
"0.6078283",
"0.6048094",
"0.59989303",
"0.5986939",
"0.59787303",
"0.59101623",
"0.590741",
"0.5895125",
"0.58949953",
"0.58100575",
"0.5757535",
"0.5750923",
"0.5743235",
"0.57311714",
"0.5729552",
"0.57249904",
"0.57111883",
"0.5707326",
"0.5699... | 0.6538844 | 1 |
Delete a container or list of them. | def container_delete(ctx, token, container_ids, force):
try:
out = ctx.obj.container_delete(token, container_ids, force)
print_message(out)
except exceptions.DockerException as e:
m = e.message
print_error(m) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_container(self, container: Container):",
"def DeleteContainers(self):\n for container in itertools.chain(*list(self.containers.values())):\n container.Delete()",
"def delete_container(ContainerName=None):\n pass",
"def delete_container(self, account, container):\n \n pass"... | [
"0.80328494",
"0.7434844",
"0.7386382",
"0.7215402",
"0.6816788",
"0.66818416",
"0.66644293",
"0.65226024",
"0.6510037",
"0.65017575",
"0.64464664",
"0.62738705",
"0.62296575",
"0.6196632",
"0.6169314",
"0.60266644",
"0.6006505",
"0.59684145",
"0.5931678",
"0.59242857",
"0.59... | 0.7737605 | 1 |
floor the point to the next lower multiple of bucket_size | def bucketize(point, bucket_size):
return bucket_size * math.floor(point / bucket_size) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)",
"def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)",
"def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('buc... | [
"0.7870765",
"0.7870765",
"0.6348058",
"0.61719537",
"0.61719537",
"0.5968876",
"0.59520507",
"0.5859526",
"0.5772838",
"0.57719916",
"0.5732552",
"0.57283777",
"0.5643887",
"0.5643887",
"0.5638288",
"0.56255656",
"0.5601112",
"0.55790997",
"0.55427814",
"0.5534332",
"0.55317... | 0.79474443 | 0 |
buckets the points and counts how many in each bucket | def make_histogram(points, bucket_size):
return Counter(bucketize(point, bucket_size) for point in points) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)",
"def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)",
"def bucketize(point, bucket_size):\r\n return bucket_size * math.floor(point /... | [
"0.7339186",
"0.7339186",
"0.6404966",
"0.63198906",
"0.63145477",
"0.6261517",
"0.6261517",
"0.6104376",
"0.6046917",
"0.5988437",
"0.59414226",
"0.58722836",
"0.58446556",
"0.5792215",
"0.57895434",
"0.57877",
"0.56928134",
"0.5675051",
"0.56433356",
"0.5634637",
"0.5604564... | 0.7430182 | 0 |
returns a random draw from a standard normal distribution | def random_normal():
return inverse_normal_cdf(random.random()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_normal():\n return inverse_normal_cdf(random.random())",
"def normal(mean, std):\n\n return random.gauss(mean, std)",
"def get_standard_normal_distribution():\n return np.random.normal(0, 1)",
"def draw_normal(self):\n means, scale = self.get_means_and_scales()\n ret... | [
"0.80372727",
"0.8026352",
"0.7671266",
"0.758368",
"0.72377944",
"0.6984565",
"0.67650646",
"0.6753574",
"0.67039895",
"0.6645325",
"0.66249055",
"0.65704095",
"0.6569231",
"0.6554965",
"0.65050215",
"0.6493456",
"0.6487899",
"0.64656204",
"0.64537066",
"0.64235955",
"0.6408... | 0.80891997 | 0 |
Transform request data to dict with 2 level of depth | def request_data_to_dict(data):
if not isinstance(data, ImmutableMultiDict):
raise ValueError('Input must be ImmutableMultiDict type.')
res = {}
for (key, value) in data.to_dict().items():
matches = re.match('(.*)\[(.*)\]', key)
if matches:
(key_lv_1, key_lv_2) =... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _to_request_dict(self):\n return {\"attr1\": self.attr1, \"attr2\": \"test\"}",
"def to_dict(self, request) -> Dict[str, Any]:\n adict = self.__dict__.copy()\n adict[\"url\"] = self.href(adict[\"url\"], request)\n adict[\"img\"] = self.href(adict[\"img\"], request)\n if... | [
"0.6378962",
"0.63289046",
"0.6227822",
"0.60884583",
"0.6052619",
"0.60420406",
"0.60407573",
"0.6025036",
"0.59494644",
"0.5901705",
"0.5876816",
"0.5818334",
"0.5817537",
"0.57859",
"0.5757327",
"0.5757108",
"0.5747224",
"0.5745498",
"0.5735476",
"0.5723966",
"0.57147676",... | 0.7102085 | 0 |
Fades all outputs to the given color and waits for it to complete. | def FadeOutputs(box, color, steps=50):
for output in box:
output.Fade(color=color, steps=steps)
time.sleep(steps / (float(box.frequency) / len(box))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_fade_colour(l, leds, r, g, b, duration):\n l._do_multi_led_command(\n create_fade_colour_command, leds, r, g, b, duration\n )",
"def color_chase(self, color: tuple = CYAN, wait: float = DEFAULT_SPEED):\n for i in range(self.np.n):\n self.np[i] = color\n ... | [
"0.6856355",
"0.66404843",
"0.64948034",
"0.6415791",
"0.6347536",
"0.62956667",
"0.6144092",
"0.6049132",
"0.5983142",
"0.59646887",
"0.5947122",
"0.5939072",
"0.59066415",
"0.58729315",
"0.57774615",
"0.5768261",
"0.5764425",
"0.5718582",
"0.56916755",
"0.56897503",
"0.5660... | 0.8030329 | 0 |
Returns the name the function should have in the Python api, based on the c++function name. For entry_type 'function', the cpp_name is used unmodified, otherwise strip everything before the first underscore, so that | def to_py_name(cpp_name, entry_type):
if entry_type == 'function':
return cpp_name
first_underscore = cpp_name.find('_')
assert(first_underscore != -1)
return cpp_name[first_underscore + 1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _plugin_funcname(func):\n funcname = func.__name__.rstrip(\"_\")\n if funcname.startswith(\"__\"):\n return funcname + \"__\"\n return funcname",
"def wrapper_function_name(text):\n text = GLGenerator.split_to_body_and_ext(text)\n body = text[0]\n ext = text[1]\n ... | [
"0.67665726",
"0.6633372",
"0.64924026",
"0.64650005",
"0.6389167",
"0.63741195",
"0.6252101",
"0.62447554",
"0.62166333",
"0.61651427",
"0.61233187",
"0.6080773",
"0.6080773",
"0.60069233",
"0.59802777",
"0.5970677",
"0.5940581",
"0.5938672",
"0.5932276",
"0.5924244",
"0.592... | 0.84663165 | 0 |
Returns the name the property should have in the Python api, based on the C++ struct name. | def property_to_py_name(cpp_struct_name):
first_underscore = cpp_struct_name.find('_')
assert first_underscore != -1
return cpp_struct_name[first_underscore + 1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def PropertyName(self) -> str:",
"def property_name(self) -> str:\n return str(self.prop_name)",
"def _get_name(x):\r\n if isinstance(x, Property) or isinstance(x, KeyIndex):\r\n return x.name\r\n elif isinstance(x, Edge):\r\n return x.label\r\n ... | [
"0.7078016",
"0.6528718",
"0.65224946",
"0.6521",
"0.6296151",
"0.6226451",
"0.622544",
"0.6218055",
"0.61775655",
"0.615941",
"0.6148613",
"0.6143576",
"0.6141541",
"0.61411786",
"0.6134688",
"0.6088676",
"0.6088676",
"0.6035033",
"0.6035033",
"0.6035033",
"0.6035033",
"0.... | 0.8263242 | 0 |
Determines the Python method type (METH_NOARGS or METH_VARARGS) from the C++ argument list and type of function. | def get_type(args_str, entry_type):
# The C-method-implementations accept self as the first argument,
# so a one-argument method will be invoked with zero arguments in Python.
no_args = 1 if entry_type == "method" else 0
return ("METH_NOARGS" if len(args_str.split(",")) == no_args
else ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type... | [
"0.61319065",
"0.60803515",
"0.5884141",
"0.5874044",
"0.58434784",
"0.5799301",
"0.56729174",
"0.56608915",
"0.5459384",
"0.54216975",
"0.54152423",
"0.53767866",
"0.53465176",
"0.5273557",
"0.52425903",
"0.51958585",
"0.51934904",
"0.513849",
"0.5122222",
"0.5119435",
"0.50... | 0.6993184 | 0 |
Creates one entry for a PyMethodDef array from the entries for one function (as returned by parse_file). | def to_PyMethodDef_entry(items):
entry_type = items[0]
items = items[1:]
if entry_type == 'method':
return 'FORWARDER(%s, %s, "%s", %s)' % items
elif entry_type == 'function':
return 'FREE_FORWARDER(%s, %s, "%s", %s)' % items
elif entry_type == 'method_template':
re... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_PyMethodDef(name, entries, extra_includes):\r\n\r\n methodEntries = [to_PyMethodDef_entry(items) for items in entries]\r\n if name is not None:\r\n methodDef = ('static PyMethodDef %s_methods[] = {\\n ' % name +\r\n ',\\n '.join(methodEntries) + ',\\n ')\r\n else:\r\n me... | [
"0.75998366",
"0.5944248",
"0.55603445",
"0.5401207",
"0.5332072",
"0.5176276",
"0.5170217",
"0.5108655",
"0.50803465",
"0.5051816",
"0.50399566",
"0.5013269",
"0.4962817",
"0.4942569",
"0.49400118",
"0.49136788",
"0.4911339",
"0.49019086",
"0.4900917",
"0.48928633",
"0.48558... | 0.7316116 | 1 |
Creates one entry for a PyGetSetDef array from the entries for one propertystruct (as returned by parse_file). | def to_PyGetSetDef_entry(cpp_struct_name, py_name, doc):
return 'PROPERTY_FORWARDER(%s, "%s", %s)' % (
cpp_struct_name, py_name, doc) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_PyGetSetDef(name, entries):\r\n getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries]\r\n getSetDef = ('static PyGetSetDef %s_getseters[] = {\\n ' % name +\r\n ',\\n '.join(getSetDefEntries) + ',\\n ')\r\n getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} /... | [
"0.71594757",
"0.5288491",
"0.50017947",
"0.49717405",
"0.49567866",
"0.49039754",
"0.48913658",
"0.47898185",
"0.47757462",
"0.47685832",
"0.47616416",
"0.47450364",
"0.47120082",
"0.46812397",
"0.46606937",
"0.46587437",
"0.46313342",
"0.46271035",
"0.4625974",
"0.46091345",
... | 0.5890328 | 1 |
Creates a string of a CPyGetSetDef array named _getseters, containing all entries in the list (as created by to_PyGetSetDef_entry). | def to_PyGetSetDef(name, entries):
getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries]
getSetDef = ('static PyGetSetDef %s_getseters[] = {\n ' % name +
',\n '.join(getSetDefEntries) + ',\n ')
getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} // Sentinel\n};'
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSets():",
"def get_drivers():\n return [str(d) for d in drivers.values()]",
"def getset(self, name, value):\r\n return self.format_bulk('GETSET', name, value)",
"def get_reader_funcs():\n return READERS",
"def get_all(self):\n return [self.get(name) for name in self.factories.ite... | [
"0.5476404",
"0.5249653",
"0.51922673",
"0.51390755",
"0.5105949",
"0.50751984",
"0.5071153",
"0.5018218",
"0.50118506",
"0.5000767",
"0.4974314",
"0.4955749",
"0.49450973",
"0.49285832",
"0.49245515",
"0.49245515",
"0.4890076",
"0.48870137",
"0.48566785",
"0.48400316",
"0.48... | 0.72217596 | 0 |
Creates a string of a CPyMethodDef array named _methods, containing all the entries in the list (as created by to_PyMethodDef_entry). Includes any include in the extra_includes list after the regular entries (before the sentinel). | def to_PyMethodDef(name, entries, extra_includes):
methodEntries = [to_PyMethodDef_entry(items) for items in entries]
if name is not None:
methodDef = ('static PyMethodDef %s_methods[] = {\n ' % name +
',\n '.join(methodEntries) + ',\n ')
else:
methodDef = ',\n'.join(m... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_PyMethodDef_entry(items):\r\n\r\n entry_type = items[0]\r\n items = items[1:]\r\n if entry_type == 'method':\r\n return 'FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'function':\r\n return 'FREE_FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'meth... | [
"0.6497351",
"0.6201312",
"0.59603804",
"0.5856507",
"0.57363343",
"0.5670631",
"0.56094706",
"0.56094706",
"0.5410812",
"0.5366973",
"0.5323587",
"0.52034914",
"0.51885706",
"0.51738644",
"0.5158081",
"0.51455",
"0.51223594",
"0.5052642",
"0.5005883",
"0.49957657",
"0.494654... | 0.7831075 | 0 |
Writes an htmlfile documenting the passed in methods, using the docstrings (as returned by parse_file) | def write_method_doc(file_name, entries):
with open(file_name, 'w', newline='\n') as f:
f.write('<table border="0">')
f.write('<tr><td><b>Method</b></td><td><b>Description</b></td></tr>')
for items in sorted(entries, key=itemgetter(3)):
f.write('<tr><td valign="top">%s</td... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n lo... | [
"0.60740525",
"0.59743536",
"0.59427845",
"0.5824418",
"0.5816547",
"0.57680935",
"0.5738121",
"0.5711054",
"0.56876665",
"0.5671265",
"0.56643975",
"0.56380713",
"0.5587306",
"0.55807567",
"0.5538951",
"0.55344105",
"0.5531077",
"0.5515236",
"0.54965585",
"0.54924417",
"0.54... | 0.75820845 | 0 |
Writes an htmlfile documenting the passed in properties, using the docstrings (as returned by parse_file) Expects a list of (propertyname, docstr)tuples. | def write_property_doc(file_name, entries):
if len(entries) == 0:
return
with open(file_name, 'w', newline='\n') as f:
f.write('<!-- Generated by %s -->' % os.path.basename(__file__))
f.write('<table border="0">')
f.write('<tr><td><b>Property</b></td><td><b>Description</b... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_to_file(properties,file):\n properties['tempfile']=None\n properties['remove_temp']=True\n properties['outfile']=file",
"def write_html(filelist):\n tmp = tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".html\", delete=False)\n\n tmp.write(r\"\"\"<!doctype html>\n <html>\... | [
"0.587501",
"0.5714326",
"0.55795753",
"0.55168897",
"0.5469178",
"0.54641604",
"0.5401127",
"0.5393503",
"0.538135",
"0.5371805",
"0.53659755",
"0.53104347",
"0.52841675",
"0.5282833",
"0.5242343",
"0.51993394",
"0.5180886",
"0.51743466",
"0.51662326",
"0.510156",
"0.5079105... | 0.7246242 | 0 |
Generate the Python methoddef header and html documentation for the c++file indicated by src_file_name, by locating "special" Ccomments. The header is saved to dst_file_name and the html documentation to dst_doc_file_name. The name is used for the PyMethodDef and PyGetSetDef. | def generate(src_file_names,
dst_file_name,
dst_doc_file_name,
dst_property_doc_file_name,
name):
methods = []
properties = []
extra_includes = []
entries = (methods, properties)
for src_file_name in src_file_names:
check_file(src... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_headers(src_files, out_root, doc_root):\r\n\r\n if not os.path.exists(out_root):\r\n os.makedirs(out_root)\r\n did_print_heading = False\r\n changed = False\r\n for (name, files) in src_files:\r\n if files.__class__ == str:\r\n src = files\r\n files = (... | [
"0.67592466",
"0.58502406",
"0.58490044",
"0.5828653",
"0.5795212",
"0.5750393",
"0.57325697",
"0.5689374",
"0.5534153",
"0.5486456",
"0.5484368",
"0.54745",
"0.546431",
"0.54593277",
"0.5433575",
"0.5428016",
"0.5416794",
"0.5406283",
"0.537415",
"0.53738326",
"0.53385717",
... | 0.6063957 | 1 |
Generate headers with a Python methoddef array and html documentation tables for the listed source files. | def generate_headers(src_files, out_root, doc_root):
if not os.path.exists(out_root):
os.makedirs(out_root)
did_print_heading = False
changed = False
for (name, files) in src_files:
if files.__class__ == str:
src = files
files = (src,)
else:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_included_function_list_readme():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n rtd_link = '`{name} <http://iteration-uti... | [
"0.6388133",
"0.63291883",
"0.6162227",
"0.61605513",
"0.605548",
"0.5960463",
"0.5946018",
"0.58367574",
"0.582316",
"0.58196837",
"0.58120084",
"0.5807799",
"0.5753701",
"0.57469726",
"0.57268125",
"0.5716415",
"0.5679393",
"0.56756175",
"0.5639785",
"0.5602997",
"0.5587299... | 0.77620685 | 0 |
Construct an instance of ``client_class`` and register it under given alias. | def create_connection(self, alias='async', client_class=AsyncElasticsearch, **kwargs):
kwargs.setdefault('serializer', serializer)
conn = self._conns[alias] = client_class(**kwargs)
return conn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_client(self, client, client_name):\n self.clients[client_name] = client",
"def create_client(self, module_name, version, client_class):\n # NOTE(kiennt): Get created client rather create a new one.\n # The key is the combination of module_name and version.\n ... | [
"0.5862595",
"0.57551134",
"0.57218677",
"0.56946874",
"0.56426543",
"0.5604221",
"0.5513979",
"0.5479135",
"0.5450147",
"0.54027754",
"0.53822577",
"0.5351544",
"0.5310474",
"0.53071845",
"0.5302867",
"0.5245894",
"0.52256596",
"0.5221313",
"0.52094626",
"0.52094626",
"0.520... | 0.64954954 | 0 |
Perform outer indexing on dask array `x`, one dimension at a time. It is assumed that `indices` is suitably normalised (no ellipsis, etc.) | def _dask_oindex(x, indices):
axis = 0
for index in indices:
x = da.take(x, index, axis=axis)
# If axis wasn't dropped by a scalar index:
if not isinstance(index, Integral):
axis += 1
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dask_getitem(x, indices):\n indices = _simplify_index(indices, x.shape)\n try:\n out = x[indices]\n except NotImplementedError:\n out = _dask_oindex(x, indices)\n # dask does culling anyway as part of optimization, but it first calls\n # ensure_dict, which copies all the keys, pres... | [
"0.6630981",
"0.65513384",
"0.625214",
"0.60793054",
"0.5899021",
"0.58930767",
"0.584284",
"0.58320093",
"0.57988596",
"0.57887155",
"0.57549566",
"0.5752462",
"0.57381105",
"0.5714157",
"0.5673157",
"0.5671463",
"0.56660175",
"0.56258434",
"0.56149113",
"0.5592513",
"0.5555... | 0.75664073 | 0 |
Determine appropriate name for callable `f` (akin to function name). | def _callable_name(f):
try:
return f.__name__
except AttributeError:
if isinstance(f, partial):
return f.func.__name__
return f.__class__.__name__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_callable_name(func):\n if isinstance(func, functools.partial):\n return get_callable_name(func.func)\n else:\n return func.__name__",
"def funcname(func):\n try:\n return '%s()' % func.__name__\n except AttributeError:\n return repr(func)",
"def name_func(func, num, params):... | [
"0.72435784",
"0.68455863",
"0.67366433",
"0.6668289",
"0.6594174",
"0.6500698",
"0.6486582",
"0.6452623",
"0.6450915",
"0.64476234",
"0.640476",
"0.640476",
"0.63907254",
"0.63632125",
"0.63278747",
"0.6300249",
"0.6296908",
"0.6286463",
"0.62828344",
"0.6204712",
"0.6204712... | 0.8215907 | 0 |
Transform data (`keep` is userspecified secondstage index). | def __call__(self, data, keep):
return self.transform(data, keep) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform(self, dataframe: DataFrame) -> DataFrame:",
"def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")... | [
"0.55106413",
"0.54800296",
"0.53775775",
"0.5366347",
"0.5208422",
"0.52055204",
"0.51941574",
"0.5188958",
"0.5141277",
"0.51131773",
"0.5106497",
"0.510201",
"0.50470954",
"0.5045959",
"0.502488",
"0.49978232",
"0.49935693",
"0.4965403",
"0.49543115",
"0.4952945",
"0.49435... | 0.5873317 | 0 |
Initialises the ``InputDevice`` object and starts ``pifacecad.SwitchEventListener``. Also, registers callbacks to ``press_key`` method. | def __init__(self):
self.cad = pifacecad.PiFaceCAD()
self.listener = pifacecad.SwitchEventListener(chip=self.cad)
for i in range(8):
self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)
self.listener.activate()
atexit.register(self.atexit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_listener():\n listener = keyboard.Listener(\n on_press=on_press\n )\n listener.start()",
"def setInput(self):\n gpio.setup(self.bcm_id, gpio.IN, pull_up_down=self.pull)\n self.mode = gpio.IN",
"def startCallback (self):\n if self.hasCallback:\n return\n... | [
"0.6211504",
"0.6013261",
"0.5729392",
"0.56921124",
"0.5665231",
"0.5625194",
"0.5592315",
"0.5584637",
"0.5559815",
"0.5458288",
"0.54401493",
"0.53518695",
"0.5323422",
"0.5239154",
"0.5230533",
"0.5202559",
"0.51961684",
"0.5169283",
"0.5162053",
"0.51603216",
"0.5147439"... | 0.6555623 | 0 |
Wrapper over _get_variable_wrapper() to get weights, with weights decay factor in loss. | def _get_weights_wrapper(
name, shape, dtype=tf.float32, initializer=initializers.xavier_initializer(),
weights_decay_factor=None
):
weights = _get_variable_wrapper(
name=name, shape=shape, dtype=dtype, initializer=initializer
)
if weights_decay_factor is not None and weights_decay_factor > 0.0:
we... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _variable_with_weight_decay(self, shape, stddev, wd):\n\n initializer = tf.truncated_normal_initializer(stddev=stddev)\n var = tf.get_variable('weights', shape=shape,\n initializer=initializer)\n\n# if wd and (not tf.get_variable_scope().reuse):\n# w... | [
"0.77637476",
"0.77271247",
"0.7695299",
"0.75827795",
"0.75666004",
"0.75396603",
"0.7501326",
"0.7492685",
"0.7492685",
"0.74766654",
"0.74586254",
"0.74359125",
"0.73115295",
"0.7291328",
"0.72828215",
"0.72800875",
"0.7099408",
"0.68967044",
"0.67947274",
"0.6773664",
"0.... | 0.78818905 | 0 |
Get variables in a triple pattern | def get_vars(triple):
return set([v for k, v in triple.items() if v.startswith('?')]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def variables(self):\n return tuple(flatten([a.variables for a in self.args]))",
"... | [
"0.65504885",
"0.62275475",
"0.60280734",
"0.58704334",
"0.58324313",
"0.5800524",
"0.5797577",
"0.57811517",
"0.5696571",
"0.56887144",
"0.5681551",
"0.56710494",
"0.5640094",
"0.56376356",
"0.56341684",
"0.56109506",
"0.56105363",
"0.55877894",
"0.5572952",
"0.5562894",
"0.... | 0.6889056 | 0 |
Find the first pattern in a set of triples pattern connected to a set of variables | def find_connected_pattern(variables, triples):
pos = 0
for triple in triples:
tripleVars = get_vars(triple['triple'])
if len(variables & tripleVars) > 0:
return triple, pos, variables | tripleVars
pos += 1
return None, None, variables | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_match(s,patterns):\n\n for p in patterns:\n m=p.match(s)\n if m:\n return p,m\n return None,None",
"def __extract_pattern_nodes(graph):\n tp_nodes = graph.subjects(RDF.type, AGORA.TriplePattern)\n for tpn in tp_nodes:\n subject = list(graph.objects(tpn, AGORA.subject)).pop()\n... | [
"0.56770355",
"0.54898673",
"0.5466495",
"0.54459643",
"0.5443161",
"0.5358336",
"0.527935",
"0.5254748",
"0.525361",
"0.5221164",
"0.52143013",
"0.51818883",
"0.51779795",
"0.5138044",
"0.5081138",
"0.5056731",
"0.49985862",
"0.49973455",
"0.4994104",
"0.49817485",
"0.498029... | 0.7778533 | 0 |
returns the frequency of a tone. formulas from | def tone_to_freq(tone):
return math.pow(2, (tone - 69.0) / 12.0) * 440.0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tone_frequency(self):\n return self.tone_frequency",
"def tone(n, base_freq=440.0):\n # -2 -1 0 1 2 3 4 5 6 7 8 9 10 11 12\n # G G# A A# B C C# D D# E F F# G G# A\n # G Ab A Bb B C Db D Eb E F Gb G Ab A\n return base_freq * 2 ** (n/12)",
"def freq():",
"de... | [
"0.7913658",
"0.77914226",
"0.7399102",
"0.7215293",
"0.7153386",
"0.7143531",
"0.7114183",
"0.70356035",
"0.69720876",
"0.695965",
"0.69546825",
"0.6954073",
"0.69424343",
"0.69303775",
"0.6903233",
"0.6880722",
"0.68684185",
"0.6841452",
"0.6833891",
"0.6807867",
"0.6769327... | 0.87280464 | 0 |
this function adds 5 cards from the deck to the hand | def deal_poker_hand(self, deck):
for i in range(5):
self.hand.append(deck.drawCard()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_card(self, added_cards):\n\n self.hand[:0] = added_cards",
"def add_a_card_to_hand(self, hand, deck):\n hand.append(deck.pop())",
"def deal(self, num_cards=7):\n self.deck.shuffle()\n for player in self.players:\n for i in range(num_cards):\n self.h... | [
"0.72352415",
"0.7053274",
"0.6955573",
"0.6879287",
"0.6851322",
"0.68399423",
"0.68385714",
"0.6816445",
"0.68131894",
"0.680666",
"0.6777246",
"0.67551434",
"0.6744151",
"0.672975",
"0.6721525",
"0.67009944",
"0.6674014",
"0.6656667",
"0.6654007",
"0.66452503",
"0.6627303"... | 0.7919288 | 0 |
prints all cards in hand | def print_hand(self):
for card in self.hand:
card.printCard() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_hand(self):\n for card in self.hand:\n print(card)",
"def player_show_hand(self):\n for card in self.get_hand():\n print(card.get_card())",
"def show_hand(self):\n\n print(f\"{self.name.title()}'s cards are:\")\n for card in self.hand:\n pri... | [
"0.86974907",
"0.83651304",
"0.8254563",
"0.79101205",
"0.790326",
"0.78862673",
"0.7541825",
"0.74812996",
"0.7473956",
"0.7269598",
"0.7237259",
"0.7206232",
"0.72048044",
"0.71964407",
"0.7182787",
"0.7144437",
"0.71311176",
"0.70919424",
"0.6971113",
"0.6928701",
"0.68750... | 0.8903049 | 0 |
There are values in the xls that have descriptions in one cell and the value to the left, this function is a helper in those cases | def get_horizontal_field_value(xls, row_index, description_index, fields_count=1, description=None, partial_match=False):
if description:
actual_description = get_cell_value(xls, row_index, description_index)
if not actual_description:
raise ValueError("empty cell at coordinate: {}:{}".f... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['... | [
"0.57297254",
"0.54683656",
"0.544114",
"0.5382319",
"0.53744954",
"0.5310725",
"0.5275922",
"0.52674615",
"0.5236666",
"0.5231945",
"0.5164695",
"0.51571536",
"0.51552546",
"0.5144152",
"0.5134118",
"0.5122181",
"0.51046264",
"0.51006085",
"0.5028648",
"0.50078714",
"0.50003... | 0.62578756 | 0 |
The user clicked to update their favorites. This checks whether or not to remove the athlete in the session as a favorite | def update_favorites():
check_favorite = Favorite.query.filter(Favorite.favorited_item==session["athlete_id"]).first()
route = f'/athletes/{session["athlete_id"]}'
if check_favorite is None:
new_update = Favorite(id=current_user.id, favorited_item=session["athlete_id"])
db.session.add(new... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_favourites(self, item_info, status):\r\n if status == \"Add\":\r\n return self.model.add_to_favourites(item_info)\r\n elif status == \"Remove\":\r\n return self.model.delete_from_favourites(item_info)",
"def favourite(self, favourite):\n\n self._favourite = f... | [
"0.69176424",
"0.688864",
"0.68283784",
"0.66788083",
"0.6618724",
"0.65838104",
"0.64054716",
"0.62992626",
"0.6211721",
"0.6205134",
"0.61849916",
"0.61616564",
"0.6160226",
"0.60770786",
"0.60408217",
"0.60285735",
"0.6025413",
"0.60150605",
"0.60038364",
"0.59932923",
"0.... | 0.7853664 | 0 |
Adds hopping conjugates to self.dict. | def add_conjugates(self):
# declare new dict
self.new_dict = copy.deepcopy(self.dict)
# iterate over items
for i in range(len(self.dict)):
for rel_tag, hopping in self.dict[i].items():
x, y, z, j = rel_tag
reverse_tag = (-x, -... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buildDict(self, words):\n for word in words:\n self.word_set.add(word)\n for candidate in self.candidates(word):\n self.neighbors[candidate] += 1",
"def makeGraphDictionary(self):\n graph_dict_incomplete = {}\n # dictionary contains all links, no matt... | [
"0.5047579",
"0.5037985",
"0.502265",
"0.5007653",
"0.49975562",
"0.49723896",
"0.49697816",
"0.48349583",
"0.48268276",
"0.48259962",
"0.48032284",
"0.47840226",
"0.4778364",
"0.47758386",
"0.47497863",
"0.4746505",
"0.47428873",
"0.4741161",
"0.47241557",
"0.47111377",
"0.4... | 0.65367895 | 0 |
Shift input ids one token to the right, and wrap the last non pad token (usually ). | def shift_tokens_right(self, input_ids, pad_token_id):
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_outp... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_tokens_right(input_ids, pad_token_id):\r\n prev_output_tokens = input_ids.clone()\r\n index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)\r\n prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()\r\n prev_output_tokens[:, 1:] = input_ids[:, :-1]\r\n re... | [
"0.8174382",
"0.7355779",
"0.730172",
"0.62766033",
"0.5987434",
"0.59027076",
"0.5735923",
"0.5567176",
"0.5503809",
"0.54715633",
"0.54715633",
"0.54715633",
"0.5460714",
"0.54170334",
"0.5338661",
"0.53169227",
"0.52875674",
"0.52819985",
"0.5207631",
"0.51908386",
"0.5179... | 0.81405616 | 1 |
Visualizes in a pyplot window an image and a label pair from provided paths. For reading files, Pillow is used so all paths and formats must be Pillowcompatible. The task definition is used to define colors for label ids (see panoptic_parts/utils/defs/template_v1.0.yaml). | def visualize_from_paths(image_path, label_path, task_def_path):
# sid2color is a mapping from all possible sids to colors
with open(task_def_path) as fp:
task_def = yaml.load(fp, Loader=yaml.Loader)
sid2color = task_def['sid2color']
# add colors for all sids that may exist in labels, but don't have a color... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_oneshot_task(pairs):\n fig,(ax1,ax2) = plt.subplots(2)\n ax1.matshow(pairs[0][0].reshape(300,300),cmap='gray')\n img = concat_images(pairs[1])\n ax1.get_yaxis().set_visible(False)\n ax1.get_xaxis().set_visible(False)\n ax2.matshow(img,cmap='gray')\n plt.xticks([])\n plt.yticks([])\... | [
"0.62449753",
"0.5956047",
"0.5905638",
"0.5834986",
"0.5807051",
"0.57767344",
"0.5769278",
"0.5720147",
"0.5714393",
"0.5708018",
"0.55924964",
"0.55918765",
"0.55361027",
"0.55186796",
"0.5489448",
"0.5472695",
"0.54674464",
"0.54264355",
"0.5421594",
"0.54154986",
"0.5411... | 0.80462617 | 0 |
Flying formation box calculation | def calculateFFBox(qOfFlights):
# if qOfFlights == 2: rows=2; columns=1
# else:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.p... | [
"0.64013517",
"0.6395771",
"0.6101848",
"0.60527843",
"0.5713939",
"0.5679622",
"0.5649706",
"0.5587416",
"0.5569904",
"0.55439585",
"0.5499885",
"0.5495949",
"0.54799724",
"0.547841",
"0.54695714",
"0.5405273",
"0.5377096",
"0.53553575",
"0.53518355",
"0.53368825",
"0.533627... | 0.6472747 | 0 |
Calculate track [degrees] between flights [degrees] | def calculateTrackBetweenFlights(lat1,lon1,lat2,lon2):
return Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['azi1'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def steps_to_angle():\n pass",
"def getFlightAngles():\n\n flight_angles = RoboCaller().call(\"getFlightAngles\", \"int\")\n for i in range(len(flight_angles)):\n flight_angles[i] = (flight_angles[i] + 2**15) % 2**16 - 2**15\n return flight_angles",
"def getTheta(self, trackWidth):\n leftDist =... | [
"0.61580503",
"0.6018533",
"0.5900657",
"0.5821334",
"0.5786489",
"0.5732119",
"0.5710629",
"0.5670621",
"0.5646427",
"0.56237847",
"0.55656844",
"0.55639803",
"0.55621606",
"0.5554743",
"0.5529634",
"0.5524188",
"0.55181473",
"0.5487468",
"0.5469649",
"0.5459477",
"0.5450718... | 0.7136936 | 0 |
Checking if tracks match | def checkTracks(track1,track2):
matched=True if abs(track1-track2) <= TRACKS_DIFFERENCE else False
return matched | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_matching_tracks(self):\n\n # 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n # 8755 : Satisfied (feat. Miguel & Queen Latifah) by Sia\n # 6699 : Un Besito Mas (feat. Juan Luis Guerra) by Jesse & Joy\n targets = {5037: '2fGFaTDbE8aS4f31fM0XE4',\n 8755: '1... | [
"0.7484322",
"0.7024405",
"0.6608633",
"0.6429307",
"0.6215027",
"0.6211795",
"0.61840993",
"0.617188",
"0.615257",
"0.61429",
"0.61068577",
"0.60989845",
"0.6077977",
"0.6003023",
"0.6001033",
"0.5992812",
"0.59848595",
"0.59843254",
"0.59752345",
"0.59705555",
"0.5970469",
... | 0.75236106 | 0 |
Returns the latitude and longitude of a point at a distance dist [m] with a degree deg from lat,lon | def getPoint(lat,lon,deg,dist):
point={}
point['LAT'] = Geodesic.WGS84.Direct(lat,lon,deg,dist)['lat2']
point['LON'] = Geodesic.WGS84.Direct(lat,lon,deg,dist)['lon2']
return point | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nearlonlat_zl(lon,lat,lonp,latp): # needed for the next function get_FVCOM_bottom_temp \r\n # approximation for small distance \r\n cp=np.cos(latp*np.pi/180.) \r\n dx=(lon-lonp)*cp\r\n dy=lat-latp \r\n xi=np.argmin(abs(dx)) \r\n yi=np.argmin(abs(dy))\r\n min_dist=111*np.sqrt(dx[xi]**2+dy[y... | [
"0.6628833",
"0.6418653",
"0.63349956",
"0.6279293",
"0.6263374",
"0.6240362",
"0.61993515",
"0.61689377",
"0.61456704",
"0.613241",
"0.6097714",
"0.6058454",
"0.6048507",
"0.60454327",
"0.6029386",
"0.6023586",
"0.60026014",
"0.6000665",
"0.59811217",
"0.59565634",
"0.592133... | 0.7408886 | 0 |
Use the current date, add ".0", to build a suffix for the Docker tag. | def _build_tag_suffix() -> str:
now = datetime.datetime.now(tz=datetime.timezone.utc).astimezone()
return now.strftime(".%Y%m%d.0") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tag_time():\n return time.strftime(\"%Y-%m-%d_%I.%M%p_\")",
"def build_image_name(self, tag):\n return self.repository_name + ':' + tag",
"def date_tag():\n import pylab\n pylab.figtext(0.04, 0.02, str(datetime.datetime.today())[:16], size=8)",
"def docker_image_tag(self, app):\n ... | [
"0.6210457",
"0.60344297",
"0.60149807",
"0.5938053",
"0.58830386",
"0.58757097",
"0.58196324",
"0.57140756",
"0.56970084",
"0.5608625",
"0.5577733",
"0.55712014",
"0.54981995",
"0.54633343",
"0.54130644",
"0.5384314",
"0.5372933",
"0.536144",
"0.53580433",
"0.5355393",
"0.53... | 0.7845467 | 0 |
Determine the sposi version to use; parse "wip" in a special way. | def osi_version() -> str:
if sp_osi is None:
return find.find_sp_osi_version()
if sp_osi == "wip":
return find.find_sp_osi_version() + defs.VERSION_WIP_SUFFIX
return sp_osi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_friendly_of_version(self, ofproto):\n if ofproto.OFP_VERSION == 1:\n _of_version = \"1.0\"\n elif ofproto.OFP_VERSION == 4:\n _of_version = \"1.3\"\n else:\n _of_version = \"Unknown version \" + \\\n str(ofproto.OFP_VERSION)\n... | [
"0.57119393",
"0.5551508",
"0.5551508",
"0.5520948",
"0.52187574",
"0.5203324",
"0.51943564",
"0.5150682",
"0.51446897",
"0.5108943",
"0.5092248",
"0.50849783",
"0.5068254",
"0.5054343",
"0.50500673",
"0.5028197",
"0.5011638",
"0.5010556",
"0.5002635",
"0.4990394",
"0.4989962... | 0.6766283 | 0 |
Rebuild the container for a single component. | def build_component(component: str) -> None:
parts: Final = component.split("-", maxsplit=1)
if len(parts) != 2: # noqa: PLR2004 # this will go away with match/case
sys.exit(f"Internal error: build_component() invoked with {component=!r}")
kolla_component, kolla_service = parts
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(self):\n self.rebuild = False\n self.redraw = True",
"def rebuild(context):\n clean(context)\n build(context, cache=False)",
"def main(\n *,\n component: list[str],\n no_cache: bool,\n pull: bool,\n quiet: bool,\n release: str,\n sp_osi: str | None,\n tag_s... | [
"0.5731498",
"0.5508753",
"0.5297388",
"0.5239832",
"0.52090853",
"0.50444996",
"0.5013207",
"0.4967973",
"0.49337313",
"0.49103594",
"0.48967397",
"0.48830613",
"0.48656592",
"0.48563054",
"0.48440525",
"0.48366556",
"0.48345816",
"0.48149598",
"0.48129988",
"0.4789811",
"0.... | 0.55748755 | 1 |
Group some charactesitics by postal code area (first 3 letters) | def postalcode_area_studies():
dfpawnshop = pd.read_csv(pawnmtl.csv)
cpdic = getPostalCodeDic()
for ik in cpdic.keys():
print ik, cpdic[ik] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_postalCA(self):\n \n index = self.index\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal = self.words[index]['word']\n index += 1\n if index == self.length:\n return None, 0\n \n if ... | [
"0.6344374",
"0.57528186",
"0.568245",
"0.56237084",
"0.5567839",
"0.5556221",
"0.54706067",
"0.54336405",
"0.53458565",
"0.5316024",
"0.5240724",
"0.5203791",
"0.5191933",
"0.51890206",
"0.51539314",
"0.5117294",
"0.5079633",
"0.5059187",
"0.50442296",
"0.50300467",
"0.50061... | 0.61772764 | 1 |
Fills in placeholders with previous entries (if such available) should be called via ajax (similar to evaluate) | def placeholders_fill_in_last_response():
task_key = request.vars.task_key
if auth.is_logged_in():
rows = db(task_query(task_key)).select()
if len(rows) > 1:
raise RuntimeError("DB error: learn table has too many (%s) entries with task_key=%s, user_id=%s " % (len(rows), task_key, a... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reload_placeholder(update):\n pass",
"def FillForm(string_for_substitution, dictionary_of_vars):\n return_string = string_for_substitution\n for i in re.findall(\"//%%(.*)%%//\", string_for_substitution):\n return_string = re.sub(\"//%%\" + i + \"%%//\", dictionary_of_vars[i],\n ... | [
"0.55820477",
"0.55585647",
"0.53613096",
"0.5329101",
"0.5301409",
"0.51870173",
"0.5107881",
"0.50597113",
"0.50417787",
"0.50273234",
"0.5014483",
"0.4979621",
"0.49399748",
"0.48602873",
"0.48537135",
"0.4786001",
"0.4781956",
"0.4773322",
"0.47684172",
"0.47568554",
"0.4... | 0.71383286 | 0 |
func returns true only if leave can be granted | def isLeaveLeft(self,leave_type,days):
if leave_type == 1 :
return days<=self.earned_balance
elif leave_type == 2 :
return days<=self.hp_balance
elif leave_type == 3 :
return days*2<=self.hp_balance
else :
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_leave_team(uid):\n current_user = get_user(uid=uid)\n current_team = api.team.get_team(current_user[\"tid\"])\n if current_team[\"team_name\"] == current_user[\"username\"]:\n return False\n if current_team[\"creator\"] == uid and current_team[\"size\"] != 1:\n return False\n i... | [
"0.6118813",
"0.5955954",
"0.5953626",
"0.59532577",
"0.59532577",
"0.585946",
"0.5846823",
"0.57779187",
"0.5727706",
"0.5714538",
"0.5692051",
"0.5681357",
"0.5681357",
"0.55972326",
"0.55932873",
"0.5589373",
"0.5589373",
"0.5589373",
"0.5589373",
"0.55799425",
"0.556054",... | 0.60508454 | 1 |
For CV Extract val_perc% of the training set as the validation set. | def get_train_val(train: datasets, test_transform: transforms,
dataset: str, val_perc: float = 0.1):
dataset_length = train.data.shape[0]
directory = 'datasets/val_permutations/'
create_if_not_exists(directory)
file_name = dataset + '.pt'
if os.path.exists(directory + file_name):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_train_validation_and_test(num_examples, val_percentage, test_percentage):\n all_samples_idx = np.arange(num_examples)\n np.random.shuffle(all_samples_idx)\n test_examples = int(np.ceil(num_examples * test_percentage))\n val_examples = int(np.ceil(num_examples * val_percentage))\n # Train a... | [
"0.64801544",
"0.60825336",
"0.60825336",
"0.60382175",
"0.6003134",
"0.5998898",
"0.5993488",
"0.59562606",
"0.59560895",
"0.59555876",
"0.59392226",
"0.58753514",
"0.5857634",
"0.5845691",
"0.5838349",
"0.58149654",
"0.5813357",
"0.58046526",
"0.58020353",
"0.5790992",
"0.5... | 0.7022956 | 0 |
DNS query to get TXT record list of google networks | def google_rr_dns_query(record: str) -> Optional[str]:
try:
res = resolver.resolve(record, 'TXT')
return str(res.rrset[0].strings[0], 'utf-8')
except (resolver.NoAnswer, resolver.NXDOMAIN) as error:
raise NetworkError(f'Error querying TXT record for {record}: {error}') from error | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getdns(self):\r\n filename = r\"dns_profiles.txt\"\r\n fp = open(filename)\r\n data = []\r\n for lines in fp.readlines():\r\n data.append(list(map(float, lines.split())))\r\n #use the fundamental string function 'append','split' to extract floating point number... | [
"0.6515697",
"0.6348239",
"0.6325451",
"0.6286375",
"0.62465525",
"0.6237042",
"0.6229455",
"0.6229455",
"0.6196531",
"0.6195094",
"0.6082494",
"0.60716206",
"0.60715824",
"0.6052533",
"0.60521525",
"0.60312563",
"0.60121626",
"0.5998611",
"0.5934739",
"0.59268296",
"0.590494... | 0.6482904 | 1 |
Fill the missing values(NaN) in column with the mean value of the group the row belongs to. The rows are grouped based on the values of another column | def fill_with_group_average(df, group, column):
#df=None
df[column].fillna(df.groupby(group)[column].transform('mean'), inplace=True)
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fill_mean(df):\n df = df.fillna(df.mean().fillna(0).to_dict())\n return df",
"def mean_impute(self, column_val):\n mean = np.mean(column_val)\n column_val = column_val.fillna(mean)\n return column_val",
"def filling_nan_values(df: pd.DataFrame) -> pd.DataFrame: \n ratio = df.c... | [
"0.733223",
"0.6911329",
"0.6804728",
"0.6377485",
"0.63724154",
"0.6184612",
"0.6125718",
"0.61130387",
"0.61074495",
"0.60799503",
"0.6020397",
"0.6011322",
"0.6011322",
"0.59590447",
"0.58487135",
"0.5830829",
"0.5822329",
"0.58109444",
"0.57864994",
"0.57757205",
"0.57416... | 0.8269875 | 0 |
Return all the rows(with all columns) where the value in a certain 'column' is greater than the average value of that column. row where row.column > mean(data.column) | def get_rows_greater_than_avg(df, column):
df= df[df[column] > df[column].mean()]
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_rows_by_highest_abs_val_mean(df, max_=MAX_NUM_ROWS):\n top_rows = numpy.abs(df.mean(axis=1)).nlargest(max_)\n return df.ix[top_rows.index]",
"def demo_one_filter():\n data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]\n avg = np.mean(data)\n print \"average value is:\", avg\n\n # create iterator... | [
"0.6379522",
"0.6149635",
"0.58458763",
"0.5754565",
"0.56743234",
"0.56359506",
"0.56359506",
"0.56054884",
"0.56054884",
"0.54775643",
"0.5467246",
"0.5397171",
"0.5395946",
"0.5346881",
"0.5316547",
"0.5313301",
"0.5276455",
"0.52631354",
"0.524703",
"0.52108675",
"0.52035... | 0.8586459 | 0 |
Takes a junitxml filename or path to said file. From this file it extracts the testsuite node and adds it to the junit_docker.xml file, in the process it adds a name to the testsuite (the suite param) and changes the classname from tests. to {suite}. Finaly, it removes the original file. This is because jenkins was not... | def merge_to_junit_xml(filename: str, suite: str) -> None:
junit_docker = Path("junit_docker.xml")
if junit_docker.exists():
tree = ElementTree.parse(junit_docker)
root = tree.getroot()
for testsuite in root:
if testsuite.get("name", None) == suite:
root.remov... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_xunit(self, filename):\n suite_node = ElementTree.Element(\"testsuite\")\n suite_node.attrib[\"name\"] = self.testsuite.name\n suite_node.attrib[\"tests\"] = str(self.testsuite.ntests)\n suite_node.attrib[\"failures\"] = str(self.testsuite.nfailed)\n if self.testsuite.pack... | [
"0.6713883",
"0.58498186",
"0.5828557",
"0.57560843",
"0.5434094",
"0.54201895",
"0.54086524",
"0.53342706",
"0.532954",
"0.53058827",
"0.52480894",
"0.5212408",
"0.52075857",
"0.5189893",
"0.51804215",
"0.5173441",
"0.5135969",
"0.5100733",
"0.5098095",
"0.50912726",
"0.5074... | 0.84821534 | 1 |
get all versions of inmanta packages into a freeze file, to make the environment inside docker like the one outside | def pip_lock_file() -> None:
with open("requirements.freeze.all", "w") as ff:
subprocess.check_call([sys.executable, "-m", "pip", "freeze"], stdout=ff)
with open("requirements.freeze.tmp", "w") as ff:
subprocess.check_call(["grep", "inmanta", "requirements.freeze.all"], stdout=ff)
# pip free... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def freeze():\n dependencies = sh('pip freeze', capture=True).split(os.linesep)\n\n with open('requirements.txt', 'w') as file:\n for dep in dependencies:\n if not dep.startswith('bones-testing'):\n file.write(dep+'\\n')",
"def freeze():\n proc = subprocess.run(['pip', '... | [
"0.71111107",
"0.6972196",
"0.6772253",
"0.6622416",
"0.6376752",
"0.62745297",
"0.6273083",
"0.62702495",
"0.6251193",
"0.60614055",
"0.5943885",
"0.592271",
"0.58881456",
"0.5876001",
"0.58746445",
"0.57682693",
"0.5724752",
"0.570974",
"0.56927276",
"0.56916934",
"0.567225... | 0.70190084 | 1 |
Return the list of docker files that should be used to run the tests against. | def _get_dockerfiles_for_test() -> str:
project_root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dockerfiles_dir = os.path.join(project_root_dir, "dockerfiles")
if sys.version_info[0:2] == (3, 6):
return os.path.join(dockerfiles_dir, "centos7.Dockerfile")
elif sys.version_... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_docker_files():\n docker_dirs = []\n if os.path.exists(TMP_DIR):\n docker_dirs = [os.path.join(TMP_DIR, d) for d in os.listdir(TMP_DIR)\n if os.path.isdir(os.path.join(TMP_DIR, d)) and\n not d.endswith('_working')]\n docker_dirs.sort(key=lamb... | [
"0.7441755",
"0.64881",
"0.64146507",
"0.63096654",
"0.6147907",
"0.61174136",
"0.6047549",
"0.6022984",
"0.6019697",
"0.59687704",
"0.59673595",
"0.5958125",
"0.5927277",
"0.5920401",
"0.5885158",
"0.58813536",
"0.58701384",
"0.58612794",
"0.58572733",
"0.5732197",
"0.573187... | 0.75666106 | 0 |
log_loss / cross_entropy / categorical_crossentropy X is the logits y is labels (num_examples, 1) Note that y is not onehot encoded vector. It can be computed as y.argmax(axis=1) from onehot encoded vectors of labels if required. | def cross_entropy(X, y, using_onehot=True):
M = y.shape[0]
if using_onehot :
log_likelihood = -np.log(np.max(X * y, -1))
else:
log_likelihood = -np.log(X[range(M), y]) # 找到y对应的那个类别所对应的logit
loss = np.sum(log_likelihood) / M
return loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass",
"def cross_entropy_loss(self, logits, labels):\n return F.cross_entropy(logits, labels)",
"def softmax_cross_entropy_loss(logit, labels):\n p = softmax(logit)\n ... | [
"0.7834664",
"0.7679004",
"0.7651251",
"0.7648797",
"0.7558988",
"0.7466316",
"0.74370956",
"0.73649174",
"0.7329104",
"0.731339",
"0.7216395",
"0.72037876",
"0.71983844",
"0.71960145",
"0.71808535",
"0.7131986",
"0.7122538",
"0.7113255",
"0.70872647",
"0.7056309",
"0.7031268... | 0.8200977 | 0 |
Goes through the first column of input table and returns the first sequence of dates it finds. | def get_dates(raw_table) -> "list of dates":
dates = []
found_first = False
for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]):
if dstr:
if len(dstr.split("/")) == 3:
d = datetime.datetime.strptime(dstr, '%m/%d/%Y')
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def min_date(self, rows: List[Row], column: DateColumn) -> Date:\n cell_values = [row.values[column.name] for row in rows if row.values[column.name] is not None]\n if not cell_values:\n return Date(-1, -1, -1)\n if not all([isinstance(value, Date) for value in cell_values]):\n ... | [
"0.6035555",
"0.60046345",
"0.5898772",
"0.5828134",
"0.58066106",
"0.5686725",
"0.5653263",
"0.56269705",
"0.56151915",
"0.5590684",
"0.54638934",
"0.54195327",
"0.54144526",
"0.5412872",
"0.5412872",
"0.5394708",
"0.5388206",
"0.5380963",
"0.5372471",
"0.5351405",
"0.529901... | 0.65179384 | 0 |
Returns the list of tweets with a given hashtag in JSON format | def getByHashtags(hashtag):
# set page_limits. The default is 1
pages_limit = request.args.get('pages_limit') or 1
pages_limit = int(pages_limit)
raw_response = get_response(tw_api, 'search/tweets', { 'q': '#' + hashtag, 'count': 100 }, pages_limit)
list_response = convert_resp2list(raw_response)... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_by_hashtag(tweets: list, hashtag: str) -> list:\n tweets_with_hashtag = {} # findall(): Kui tekstis on rohkem kui üks regulaaravaldisele vastav alamsõne saab kõikide vastete järjendi moodustada funktsiooniga findall()\n pattern = r\"#\\w+\" # \\w : tähed, numbrid, alakriips, + : 1 või rohkem\n ... | [
"0.76560086",
"0.7615131",
"0.7581406",
"0.74934655",
"0.7097482",
"0.6735754",
"0.66883725",
"0.6683702",
"0.6654017",
"0.6516785",
"0.6509575",
"0.6500619",
"0.64606947",
"0.64300966",
"0.6401342",
"0.6375272",
"0.63466364",
"0.62755454",
"0.62103456",
"0.62082505",
"0.6136... | 0.8051795 | 0 |
Test density function for multiple values at once | def test_density_multiple(self):
earth = PREM()
radii = np.linspace(0, 6500e3, 6501)
expected = [earth.density(r) for r in radii]
assert np.array_equal(earth.density(radii), expected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_density_multiple(self):\n earth = CoreMantleCrustModel()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)",
"def test_probability_density(self):\n # Setup\n copula = Gau... | [
"0.72719777",
"0.6636601",
"0.62585735",
"0.6246495",
"0.6211138",
"0.61260253",
"0.6116143",
"0.61090654",
"0.60379136",
"0.603717",
"0.60360193",
"0.6025429",
"0.5937065",
"0.5931737",
"0.58895713",
"0.58884156",
"0.58661264",
"0.5830155",
"0.58052385",
"0.5774332",
"0.5773... | 0.73319197 | 0 |
Test density function for multiple values at once | def test_density_multiple(self):
earth = CoreMantleCrustModel()
radii = np.linspace(0, 6500e3, 6501)
expected = [earth.density(r) for r in radii]
assert np.array_equal(earth.density(radii), expected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_density_multiple(self):\n earth = PREM()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)",
"def test_probability_density(self):\n # Setup\n copula = GaussianMultivariat... | [
"0.73319197",
"0.6636601",
"0.62585735",
"0.6246495",
"0.6211138",
"0.61260253",
"0.6116143",
"0.61090654",
"0.60379136",
"0.603717",
"0.60360193",
"0.6025429",
"0.5937065",
"0.5931737",
"0.58895713",
"0.58884156",
"0.58661264",
"0.5830155",
"0.58052385",
"0.5774332",
"0.5773... | 0.72719777 | 1 |
Ensure gitfusionuser has permissions to write to depot. | def check_p4gf_user_write_permission(self):
gf_client_map = P4.Map()
gf_client_map.insert("//...", "//client/...")
utp = p4gf_protect.UserToProtect(self.ctx.p4)
prot = utp.user_to_protect(p4gf_const.P4GF_USER)
gf_write_filter = prot.map_for_perm(p4gf_protect.WRITE)
gf_wri... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_writable_(self):\n self._check_within_context_()\n if self._mode != 'w':\n raise Exception('Cannot update database: read only mode')",
"def fix_permissions(cls):\n\n try:\n build_dir = environ[\"TRAVIS_BUILD_DIR\"]\n commands = [\n \... | [
"0.61675584",
"0.59262735",
"0.5913495",
"0.57361317",
"0.57273996",
"0.5724241",
"0.5694993",
"0.56913006",
"0.56799525",
"0.56661433",
"0.56573737",
"0.5642319",
"0.5618935",
"0.56070924",
"0.5567714",
"0.5564018",
"0.5546084",
"0.55418223",
"0.5533661",
"0.54613906",
"0.54... | 0.66640127 | 0 |
Return a dict of depot_path => user of any locked files. | def _find_locked_by(self):
fstat_flags = NTR('otherLock | otherOpen0 & headType=*+l')
any_locked_files = {} # depot_path : user
for branch_chunk in self.ctx.iter_writable_branch_chunks():
# Skip any newly defined branches: they're new, won't contain any
# files yet, and ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLockInfoOfNonDerivedFiles(self, ids, wspLockId):\n sql = \"\"\"\n SELECT cdb_file.cdb_lock,\n cdb_file.cdb_lock_id,\n cdb_file.cdbf_object_id,\n cdb_file.cdb_object_id,\n angestellter.name AS mapped_cdb_lock_name\n FROM\n ... | [
"0.5967704",
"0.58132005",
"0.5756078",
"0.5530532",
"0.55011237",
"0.54566556",
"0.538601",
"0.5357067",
"0.5334322",
"0.5324496",
"0.53202266",
"0.5259777",
"0.5258062",
"0.5243739",
"0.5193406",
"0.5120801",
"0.50814766",
"0.5075854",
"0.5063202",
"0.50581175",
"0.5055285"... | 0.7146783 | 0 |
Ensure the entire sequence of commits will (likely) go through without any errors related to permissions or locks. Raises an exception if anything goes wrong. | def check_commits(self, commits):
LOG.info('Checking Perforce permissions and locks')
self.ctx.checkpoint("copy_to_p4._preflight_check")
# Stop if files are opened in our repo client
# We expect this to be none, since we have the view lock
opened = self.ctx.p4.run(['opened', '-m... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_commit(self):\n # TODO: Test errors while committing and recovery\n pass",
"def commit_unless_managed(self):\n if not self.is_managed():\n self.commit()",
"def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.... | [
"0.70281065",
"0.62827456",
"0.61202234",
"0.6114499",
"0.6076478",
"0.6076178",
"0.60664564",
"0.6021291",
"0.6021291",
"0.6021291",
"0.6021291",
"0.6021291",
"0.6007565",
"0.59706897",
"0.596416",
"0.5880884",
"0.5870122",
"0.5866803",
"0.5857918",
"0.58138025",
"0.5733545"... | 0.6676914 | 1 |
Prior to copying a commit, perform a set of checks for a specific branch to ensure the commit will (likely) go through successfully. | def check_commit_for_branch( self
, commit
, branch_id
, any_locked_files
, case_conflict_checker ):
rev = commit['sha1']
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug(... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checko... | [
"0.66593283",
"0.653673",
"0.64837676",
"0.64365125",
"0.63583773",
"0.628439",
"0.6173651",
"0.6014642",
"0.6005059",
"0.5976242",
"0.5954787",
"0.58620733",
"0.5859521",
"0.5851176",
"0.58046544",
"0.57973",
"0.579227",
"0.57077634",
"0.56944895",
"0.56923765",
"0.56910425"... | 0.72327125 | 0 |
If not already switched to and synced to the correct branch for the given commit, do so. If this is a new lightweight branch, perform whatever creation we can do at preflight time. We don't have commits/marks for any notyetsubmitted parent commits, so the depot_branch_info will often lack a correct parent or fully popu... | def ensure_branch_preflight(self, commit, branch_id):
log = LOG.getChild('ensure_branch_preflight')
branch = self.ctx.branch_dict().get(branch_id)
# branch should never be None here. p4gf_branch_id.Assigner() must
# create Branch objects for each assignment.
if self._curren... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n ... | [
"0.6567766",
"0.615474",
"0.6144223",
"0.6067799",
"0.5996831",
"0.5930555",
"0.5926467",
"0.58662117",
"0.58450073",
"0.5838003",
"0.58024377",
"0.57683945",
"0.5761088",
"0.574812",
"0.56977725",
"0.5657013",
"0.56563455",
"0.56526315",
"0.5631415",
"0.56144863",
"0.5603229... | 0.6462633 | 1 |
Does this branch map our placeholder file? Returns nonFalse if mapped, None or empty string if not. | def _is_placeholder_mapped(self):
return self.ctx.gwt_path(
p4gf_const.P4GF_EMPTY_CHANGELIST_PLACEHOLDER).to_depot() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isBasedInHiddenFile(self):\n #type: () -> Optional[bool]\n return (\n None if self.realFileName is None #if before\n else self.realFileName != self.fileName\n )",
"def fileProcessed(self,fileInstance):\n if hasattr(fileInstance,\"name\"): name=fileInstance.n... | [
"0.56640106",
"0.5638936",
"0.55160695",
"0.5359276",
"0.5341279",
"0.5287153",
"0.5251435",
"0.52480775",
"0.521388",
"0.5208746",
"0.5203581",
"0.51782346",
"0.51517147",
"0.5145897",
"0.51427215",
"0.51353425",
"0.5104298",
"0.5096157",
"0.5081865",
"0.506784",
"0.5066481"... | 0.693119 | 0 |
If any of the files in this commit intersect any fully populated branch (other than the current branch), then reject this commit. Shared/common/overlapping paths in branch views must be readonly from Git. Otherwise you end up with a Git push of commit on one Git branch inserting changes into other Git branches behind G... | def _check_overlap(self, fe_commit):
# +++ Avoid O(b branches * r rev) checks when
# overlap is impossible because current branch
# overlaps no other branch.
if self._current_branch not in self._overlapping_branch_list():
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n ... | [
"0.70267016",
"0.6255848",
"0.6211556",
"0.6012206",
"0.5753158",
"0.57477814",
"0.574088",
"0.5717972",
"0.57146573",
"0.5703639",
"0.5662798",
"0.5624615",
"0.5610022",
"0.5576558",
"0.5571656",
"0.5534969",
"0.5534018",
"0.5515806",
"0.5472234",
"0.5436517",
"0.5434736",
... | 0.7310925 | 0 |
If this is a stream branch, check that all files in the commit are writable. If any of the files is not writable then reject this commit. | def _check_stream_writable(self, fe_commit):
if not self._current_branch.stream_name:
return
prefix = self._current_branch.writable_stream_name + '/'
for fe_file in fe_commit['files']:
gwt_path = fe_file['path']
depot_path = self.ctx.gwt_path(gwt_path).to_de... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_stream_in_classic(self, fe_commit):\n if self._current_branch.stream_name:\n return\n\n depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to... | [
"0.730989",
"0.62108445",
"0.6002569",
"0.59505546",
"0.5941183",
"0.5897741",
"0.5511451",
"0.5413663",
"0.5360557",
"0.53083956",
"0.52989227",
"0.5220939",
"0.5215751",
"0.5145717",
"0.5142257",
"0.50900465",
"0.5081833",
"0.5081318",
"0.50811625",
"0.50787395",
"0.5073544... | 0.8073145 | 0 |
If this is a classic branch, check that none of the files in the commit are in stream depots and thus not writable. If any of the files is not writable then reject this commit. | def _check_stream_in_classic(self, fe_commit):
if self._current_branch.stream_name:
return
depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')
for fe_file in fe_commit['files']:
gwt_path = fe_file['path']
depot_path = self.ctx.gwt_path(gwt_path).to_depot()
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_stream_writable(self, fe_commit):\n if not self._current_branch.stream_name:\n return\n prefix = self._current_branch.writable_stream_name + '/'\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_... | [
"0.73341",
"0.6207531",
"0.6191185",
"0.6037307",
"0.5875063",
"0.5831943",
"0.56964684",
"0.5686395",
"0.56552875",
"0.56157583",
"0.5488412",
"0.5477116",
"0.54372096",
"0.5435764",
"0.5399177",
"0.53548175",
"0.5332237",
"0.53187644",
"0.5318055",
"0.53075296",
"0.52779925... | 0.71837646 | 1 |
Return True if the named path was introduced in the HEAD commit. | def _path_added(self, path, fecommit):
# Because git-fast-export includes the entire tree in its output,
# regardless of whether the requested commit is the first in the
# branch or not, we need to check the repo itself to be certain if
# this path was truly introduced in this commit, or... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def git_has_object(project: Project, name: str) -> bool:\n ret = project.git(\"rev-parse\", \"--verify\", name, _ok_code=[0, 128])\n return ret.exit_code == 0",
"def test_heads_contains_true(repository: Repository) -> None:\n assert repository.head.name in repository.heads",
"def _is_branch(self, refe... | [
"0.6516155",
"0.63896835",
"0.6222142",
"0.6222135",
"0.60880154",
"0.60826945",
"0.6018103",
"0.5985125",
"0.59479386",
"0.5940511",
"0.59299994",
"0.59243816",
"0.59173447",
"0.5912473",
"0.5822935",
"0.58216715",
"0.5808256",
"0.5804621",
"0.5717881",
"0.56875396",
"0.5678... | 0.6642211 | 0 |
We have changed our branch_dict (or more likely finish_branch_definition()ed a branch within that dict) in a way that invalidates any cached calculations that consumed the branch dict. | def _invalidate_branch_cache(self):
self._cached_overlapping_branch_list = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def finish_branch_definition(self, commit, branch):\n assert self._finish_branch_definition\n self._finish_branch_definition.finish_branch_definition(commit, branch)\n self._invalidate_branch_cache()",
"def tree_removeDeadBranches():\n nonlocal d_tree\n d_tree = { k : v... | [
"0.60243195",
"0.56031704",
"0.548174",
"0.54290056",
"0.52304393",
"0.52058154",
"0.5188001",
"0.5129975",
"0.5117015",
"0.511483",
"0.51062316",
"0.5055685",
"0.5053761",
"0.50004244",
"0.49610597",
"0.4951123",
"0.49497518",
"0.4948754",
"0.49486953",
"0.49447733",
"0.4924... | 0.6802069 | 0 |
Return a list of fully populated branches that overlap other fully populated branches. Caches the result because we check every file revision path for overlap, and for huge repos with thousands of nonoverlapping LW branches, just iterating through the branch list starts to waste measurable CPU time. | def _overlapping_branch_list(self):
if self._cached_overlapping_branch_list is not None:
return self._cached_overlapping_branch_list
have_overlap = set()
for outer in p4gf_branch.iter_fp_non_deleted(self.ctx.branch_dict()):
outer_lhs = P4.Map()
outer_lhs.inse... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_branches_to_merge(branch):\n branches = [(branch, branch.subfolder or '')]\n for dependency in branch.branch_dependency_ids:\n branches.append((dependency.merge_with_branch_id, dependency.merge_subfolder or ''))\n return branches[::-1]",
"def branches_full(config, args):\... | [
"0.6201244",
"0.6164248",
"0.61058915",
"0.6089111",
"0.60666037",
"0.58286184",
"0.5817222",
"0.574742",
"0.57133055",
"0.570316",
"0.5672118",
"0.5664436",
"0.56305796",
"0.55732614",
"0.55578953",
"0.54982585",
"0.5476394",
"0.5456892",
"0.5438345",
"0.5427029",
"0.5400545... | 0.7866057 | 0 |
If gfe_file is under Git LFS control, require that its large file content exist somewhere, either in our upload cache (it's new!) or in depot dedupe storage (already got it). | def _check_lfs(self, fe_commit, fe_file):
# Deleted files carry no LFS pointer.
if "sha1" not in fe_file:
return
# Symlinks and non-files carry no LFS pointer.
if fe_file.get("mode") not in [ FileModeStr.PLAIN
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_file_managed_keep_source_false_http(\n file, tmp_path, remote_grail_scene33, modules\n):\n name = str(tmp_path / \"testfile\")\n # Run the state\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n keep_sou... | [
"0.5939489",
"0.5912179",
"0.58820075",
"0.5847848",
"0.58317304",
"0.5623303",
"0.5542459",
"0.5534037",
"0.5512348",
"0.5486995",
"0.54663646",
"0.54334575",
"0.5396594",
"0.5396052",
"0.5381335",
"0.53738886",
"0.53469056",
"0.5326318",
"0.530705",
"0.530241",
"0.52834505"... | 0.6616472 | 0 |
Init view map for client. | def init_view(self):
self.view_map = self.ctx.clientmap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_viewing_client():\n\n # Read configuration settings\n config = gis.get_config()\n if config.opt_gis_layout == 1:\n window = True\n else:\n window = False\n\n # @ToDo Make Configurable\n toolbar = True\n\n map = define_map(window=window, toolbar=toolbar, config=config)\n\n... | [
"0.73483825",
"0.6393654",
"0.6328302",
"0.62651557",
"0.6215494",
"0.621435",
"0.6202144",
"0.61714244",
"0.60852766",
"0.60486645",
"0.5990444",
"0.595002",
"0.59376174",
"0.593404",
"0.5898968",
"0.58576566",
"0.5853716",
"0.5853698",
"0.5803742",
"0.5777687",
"0.576832",
... | 0.90606475 | 0 |
Run list of paths through filter and set list of paths that don't pass. | def filter_paths(self, blobs):
# check against one map for read, one for write
# if check fails, figure out if it was the view map or the protects
# that caused the problem and report accordingly
self.author_denied = []
self.pusher_denied = []
self.foruser_denied = []
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exclude_filter(excl_filter, paths):\n misses = set()\n for p in paths:\n if re.search(excl_filter, p) is None:\n misses.add(p)\n\n return misses",
"def clean_dir_filtered(dr, filters):\n # type: (path, List[str]) -> None\n for f in os.listdir(dr):\n for fltr in filters... | [
"0.6571253",
"0.6168529",
"0.6118564",
"0.60856956",
"0.6034449",
"0.6013353",
"0.5978687",
"0.58510804",
"0.58142954",
"0.58128434",
"0.57699585",
"0.5687041",
"0.5659206",
"0.56480026",
"0.5642498",
"0.5623489",
"0.5614679",
"0.5568684",
"0.556686",
"0.550684",
"0.5481009",... | 0.6176403 | 1 |
Print the given message to the error stream, as well as to the log. | def _print_error(msg):
sys.stderr.write(msg + '\n')
LOG.error(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(message):\n print(message, file=sys.stderr)",
"def log_error(message):\n sys.stderr.write(message)\n sys.stderr.flush()",
"def log_error(self, message):\n u = six.text_type\n log_line = (\n u('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR] {1} {2}?{3} => {4}\\n').\n ... | [
"0.7898539",
"0.7735587",
"0.76756036",
"0.75126797",
"0.74449044",
"0.7419306",
"0.7378443",
"0.7376303",
"0.73306",
"0.73223484",
"0.7314951",
"0.7312553",
"0.72963977",
"0.7293763",
"0.72898746",
"0.7254633",
"0.7225247",
"0.7214274",
"0.72096974",
"0.7171879",
"0.7138568"... | 0.7820772 | 1 |
Check if c will be rejected by P4D as nonprintable. P4D rejects "nonprintable" characters with | def is_p4d_printable(c):
if ord(c) < 0x20:
return False
if ord(c) == 0x7F:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_printable(c):\n return ord(c)>=32 or c in ['\\r','\\n', '\\t']",
"def is_printable(s):\n for c in s:\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True",
"def is_printable(b):\n return b in e(string.printable)",
"def is_string_printable(string_):\n return... | [
"0.8065967",
"0.7819546",
"0.7561507",
"0.70877075",
"0.6687797",
"0.66823083",
"0.66677797",
"0.66154623",
"0.6585473",
"0.65472776",
"0.6435375",
"0.64282465",
"0.6415623",
"0.63944894",
"0.6227201",
"0.6072948",
"0.60558593",
"0.60486054",
"0.6011463",
"0.59352165",
"0.592... | 0.7970663 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.