query stringlengths 9 9.05k | document stringlengths 10 222k | metadata dict | negatives listlengths 30 30 | negative_scores listlengths 30 30 | document_score stringlengths 4 10 | document_rank stringclasses 2
values |
|---|---|---|---|---|---|---|
Pad or truncate a list `x` with the values `pad_value` and `maxlen`. | def list_pad_or_truncate(x, maxlen, pad_value=None):
length = len(x)
if maxlen > length:
x += [pad_value] * (maxlen - length)
elif maxlen < length:
x = x[:maxlen]
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pad_with_zero(list, max_length, pad_type):\n padded_list = pad_sequences(list, maxlen=max_length, padding=pad_type, truncating='post')\n return padded_list",
"def pad_tokens(x, max_length, pad_token_id,\n truncate_from=\"left\",\n pad_from=\"left\"):\n assert truncate_fro... | [
"0.7232516",
"0.7194974",
"0.70571303",
"0.7005884",
"0.69063616",
"0.69063616",
"0.6899334",
"0.68993306",
"0.68805975",
"0.68804926",
"0.68802965",
"0.6851657",
"0.6702354",
"0.66523",
"0.6640961",
"0.65956634",
"0.65647626",
"0.65528095",
"0.6521586",
"0.6515963",
"0.64783... | 0.8817511 | 0 |
Return list of rain fall for previous year | def precipitation():
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
rain = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date > last_year).\
order_by(Measurement.date... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xbrl_years(self):\n return [year for year in self.years if year >= 2021]",
"def calculate_iron_hemoglobin_time_lag_effective_fraction(df, years):\n final = pd.DataFrame()\n data = df.reset_index()\n for i in list(range(0, len(years))):\n current = (data.loc[data.year == years[i]]\n ... | [
"0.59791404",
"0.5774895",
"0.57245696",
"0.5675499",
"0.55384594",
"0.5491503",
"0.5471699",
"0.5403219",
"0.54018354",
"0.5384144",
"0.5367263",
"0.5363518",
"0.5355815",
"0.5304118",
"0.5281944",
"0.52812725",
"0.5260535",
"0.5254466",
"0.5250503",
"0.52311355",
"0.5220862... | 0.61494935 | 0 |
Create mode of given scale | def scale_to_mode(scale, transpose=0):
# find mode scheme based on original scale
l = scale[transpose:]
# create complete 16-elements list of steps
i = ceil((16 - len(l)) / 12)
l += scale * i
l = list(accumulate(l))
n = l[0]
l = list(map(lambda x: x - n, l))
return l[:16] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale... | [
"0.6633015",
"0.6360772",
"0.62977636",
"0.62056977",
"0.6008764",
"0.60017097",
"0.5995628",
"0.5878364",
"0.5820572",
"0.58104134",
"0.5709557",
"0.56858575",
"0.5675628",
"0.56720704",
"0.5600536",
"0.55899876",
"0.5563008",
"0.5555391",
"0.55215645",
"0.55069333",
"0.5503... | 0.6723477 | 0 |
This function is from the latest version of SCons to support older SCons version. Configure check for a specific program. Check whether program prog_name exists in path. If it is found, returns the path for it, otherwise returns None. | def CheckProg(context, prog_name):
context.Message("Checking whether %s program exists..." % prog_name)
path = context.env.WhereIs(prog_name)
context.Result(bool(path))
return path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_program(binary_name):\n pth = os.path.abspath(__file__)\n\n # Split off the name and the directory...\n pth, notused = os.path.split(pth)\n pth, notused = os.path.split(pth)\n pth = os.path.join(pth, \"programs\", binary_name)\n pth = os.path.normpath(pth)\n\n log.debug(\"Checking for... | [
"0.67661804",
"0.66921204",
"0.65894896",
"0.6528314",
"0.64908946",
"0.6469822",
"0.641857",
"0.63612264",
"0.6318409",
"0.6250026",
"0.61892205",
"0.61833847",
"0.61833847",
"0.61687654",
"0.61340445",
"0.61195254",
"0.61188084",
"0.6089571",
"0.6089144",
"0.6088777",
"0.60... | 0.7263758 | 0 |
This function is from SCons but extended with additional flags, e.g. the extra_libs. Another (more sophisticated) test for a library. Checks, if library and header is available for language (may be 'C' or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'. As in CheckLib, we support library=None, to test if... | def CheckLibWithHeader(context, libs, header, language,
call = None, extra_libs = None, autoadd = 1):
prog_prefix, dummy = \
SCons.SConf.createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [l... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_library(self, **kw):\n\tself.check(\n\t\tcompile_filename = [],\n\t\tfeatures = 'link_lib_test',\n\t\tmsg = 'Checking for libraries',\n\t\t)",
"def check_libraries(env):\n # Detect OS X python installation, and attempt to correct for it.\n if os.uname()[0] == 'Darwin':\n env.Replace(SHLINK... | [
"0.70988935",
"0.6297976",
"0.60500425",
"0.56923157",
"0.55388916",
"0.5488028",
"0.53949934",
"0.5367634",
"0.532435",
"0.5322072",
"0.5235258",
"0.5161452",
"0.5156778",
"0.51427037",
"0.5137287",
"0.5064834",
"0.501241",
"0.4967916",
"0.49471545",
"0.49432126",
"0.4930325... | 0.71553355 | 0 |
Returns a playlist with a given name or raise NotFound. | def playlist(self, title): # noqa
for item in self.playlists():
if item.title == title:
return item
raise NotFound('Invalid playlist title: %s' % title) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getPlaylist(self,name):\n playlist = self.getAllPlaylists(name)\n return playlist[0] if playlist else None",
"def find_playlist(playlist_name):\n\n playlists = spotifyObject.user_playlists(config.USERNAME)\n\n for playlist in playlists['items']:\n if playlist['name'] == playlist_na... | [
"0.7941071",
"0.7861843",
"0.78293544",
"0.7535346",
"0.69100803",
"0.6823972",
"0.6806134",
"0.6756279",
"0.67378354",
"0.6722012",
"0.6667086",
"0.66420937",
"0.65845215",
"0.65714145",
"0.6559312",
"0.65355885",
"0.65215516",
"0.6419069",
"0.63462394",
"0.63388675",
"0.629... | 0.79534113 | 0 |
List all active sessions. | def sessions(self):
return utils.listItems(self, '/status/sessions') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sessions(self):\n\n return self.all_sessions",
"def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200",
"def fusion_api_get_active_sessions(self):\n ... | [
"0.75758445",
"0.757478",
"0.7396808",
"0.7384779",
"0.73801714",
"0.72702295",
"0.71300334",
"0.7045607",
"0.7029493",
"0.6845006",
"0.679199",
"0.6788236",
"0.6770604",
"0.6673959",
"0.6615374",
"0.65827996",
"0.6541172",
"0.65161306",
"0.6476787",
"0.6473066",
"0.647231",
... | 0.76642567 | 0 |
Update the use of a cache. | def _update_use(self, key):
if (self._replace_pol == Cache.LRU):
self.cache[key]= self.hashmap[key]
if (self._replace_pol == Cache.LRU_S):
self.cache[key] = self.hashmap[key] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_cache(self, val):\n pass",
"def update(self, cache_key):\r\n self._write_sha(cache_key)",
"def set_to_cache(self, url, data):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n MEM_CACHE[cache_key][cache_lookup] = (data, time.time())",
"def do_api_calls_update_cache(se... | [
"0.70355237",
"0.67454726",
"0.66589284",
"0.66395354",
"0.6594092",
"0.658877",
"0.655342",
"0.63988495",
"0.63722324",
"0.63371176",
"0.6319258",
"0.6313111",
"0.6270669",
"0.62608325",
"0.623213",
"0.6211307",
"0.6194371",
"0.61508423",
"0.61492276",
"0.61486644",
"0.61116... | 0.7350366 | 0 |
Return the name, arguments, and return type of the first function definition found in code. Arguments are returned as [(type, name), ...]. | def parse_function_signature(code):
m = re.search("^\s*" + re_func_decl + "\s*{", code, re.M)
if m is None:
print(code)
raise Exception("Failed to parse function signature. "
"Full code is printed above.")
rtype, name, args = m.groups()[:3]
if args == 'void' or ar... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_functions(code):\n regex = \"^\\s*\" + re_func_decl + \"\\s*{\"\n \n funcs = []\n while True:\n m = re.search(regex, code, re.M)\n if m is None:\n return funcs\n \n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n ... | [
"0.68184936",
"0.66357005",
"0.6627714",
"0.6380974",
"0.634481",
"0.6164433",
"0.59679073",
"0.5878357",
"0.58456427",
"0.58147675",
"0.57996273",
"0.5772736",
"0.57490474",
"0.57399726",
"0.56921184",
"0.56889397",
"0.56479543",
"0.56471384",
"0.5646925",
"0.55967027",
"0.5... | 0.6752605 | 1 |
Return a list of (name, arguments, return type) for all function definition2 found in code. Arguments are returned as [(type, name), ...]. | def find_functions(code):
regex = "^\s*" + re_func_decl + "\s*{"
funcs = []
while True:
m = re.search(regex, code, re.M)
if m is None:
return funcs
rtype, name, args = m.groups()[:3]
if args == 'void' or args.strip() == '':
args = []
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_prototypes(code):\n\n prots = []\n lines = code.split('\\n')\n for line in lines:\n m = re.match(\"\\s*\" + re_func_prot, line)\n if m is not None:\n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n ... | [
"0.6066096",
"0.6015459",
"0.5959938",
"0.59203005",
"0.583354",
"0.56550306",
"0.56340224",
"0.5607439",
"0.557955",
"0.554569",
"0.5541941",
"0.5537403",
"0.5534576",
"0.5530362",
"0.5520808",
"0.5512901",
"0.54985154",
"0.5478151",
"0.54779917",
"0.54722935",
"0.5433578",
... | 0.6613337 | 0 |
Return a list of signatures for each function prototype declared in code. Format is [(name, [args], rtype), ...]. | def find_prototypes(code):
prots = []
lines = code.split('\n')
for line in lines:
m = re.match("\s*" + re_func_prot, line)
if m is not None:
rtype, name, args = m.groups()[:3]
if args == 'void' or args.strip() == '':
args = []
else:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_functions(code):\n regex = \"^\\s*\" + re_func_decl + \"\\s*{\"\n \n funcs = []\n while True:\n m = re.search(regex, code, re.M)\n if m is None:\n return funcs\n \n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n ... | [
"0.6960925",
"0.6853837",
"0.6183662",
"0.6137309",
"0.61293304",
"0.585127",
"0.58011335",
"0.5792403",
"0.5768999",
"0.5726607",
"0.571727",
"0.5692678",
"0.56545895",
"0.5620403",
"0.55659837",
"0.5563249",
"0.55443704",
"0.5544288",
"0.5539026",
"0.55336374",
"0.55096585"... | 0.76626337 | 0 |
Return a list of template variables found in code. | def find_template_variables(code):
return re.findall(re_template_var, code) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vars(cls):\n for key in dir(cls):\n if key.startswith('var_'):\n yield key[4:]",
"def variables(self):\n return {u for u in self if u.type == 'var'}",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def variables(self):\r\n ... | [
"0.66962886",
"0.65876555",
"0.6326123",
"0.6295308",
"0.62385863",
"0.62311065",
"0.62209594",
"0.6211561",
"0.61806494",
"0.61253965",
"0.61250657",
"0.6060957",
"0.6031869",
"0.6000928",
"0.59701294",
"0.5965443",
"0.5964786",
"0.5951391",
"0.59493124",
"0.592245",
"0.5907... | 0.8810326 | 0 |
Returns a function for generating trials for a model op. Infers the Python main module for the operation and returns the `gen_trials` function defined for that module. Raise `TypeError` if the operation does not use a Python main module (either explicitly with the `main` attribute or implicitly in the `exec` attribute. | def optimizer_trial_generator(model, op_name):
try:
module_name = _model_op_main(model, op_name)
except ValueError as e:
raise TypeError(
f"could not get main module for {model.name}{op_name}: {e}"
) from None
else:
try:
main_mod = importlib.import_mod... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_test_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.Dataset,\n argparse.Namespace,\n torch.nn.Module,\n Progress,\n TaskID,\n ],\n Tuple[Dict[str, float], pd.DataFrame],\n ]:\n pass",
"def main(_):\n... | [
"0.52469647",
"0.50867206",
"0.49812433",
"0.49300626",
"0.48819524",
"0.48743096",
"0.48729882",
"0.48613867",
"0.4854776",
"0.48491868",
"0.48481944",
"0.48190248",
"0.48174357",
"0.47987285",
"0.47749686",
"0.47592515",
"0.47589567",
"0.47463167",
"0.4741073",
"0.4720266",
... | 0.75061655 | 0 |
Looks for main module in exec spec for model op. Raises `ValueError` if exec spec is empty or not in the exepcted format. | def _op_main_for_exec(exec_):
if not exec_:
raise ValueError("exec spec not specified")
m = re.search(r"-u?m ([^ ]+)", exec_)
if not m:
raise ValueError(f"unexpected exec spec: {exec_!r}")
return m.group(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _find_module(model, mod_name):\n for name, module in model.named_modules():\n if name == mod_name:\n return module\n return None",
"def search_executable(op, description = None):\n checked = []\n ret = None\n if isinstance(op, (list, tuple)):\n for ii in op:\n if not ii in ... | [
"0.53562915",
"0.53267694",
"0.5284103",
"0.5240484",
"0.51123273",
"0.50640464",
"0.5061494",
"0.50039196",
"0.4992714",
"0.4973761",
"0.49309054",
"0.49113435",
"0.49091592",
"0.49087858",
"0.49011794",
"0.48769084",
"0.4876754",
"0.48592058",
"0.48583668",
"0.48241246",
"0... | 0.6107568 | 0 |
Return a vignette for the package | def getVignette(self, packageUrl):
cat = getToolByName(self.context, 'portal_catalog')
results = cat.searchResults(portal_type='Vignette',
path={'query': packageUrl})
if results:
return results[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def for_slug(slug):\n vig = Vignette.objects.filter(slug=slug).first()\n if not vig:\n vig = Vignette(slug=slug, content=json.dumps({'data': [\n {'type': 'text', 'data': {\n 'text': 'Missing Vignette `' + slug + '`'}}]}))\n return vig",
"def _prov... | [
"0.6036985",
"0.53940934",
"0.5330004",
"0.530821",
"0.52660775",
"0.5136677",
"0.5044925",
"0.5041591",
"0.5003167",
"0.49641988",
"0.49498764",
"0.49451274",
"0.48880824",
"0.48761797",
"0.48731172",
"0.4868222",
"0.48601785",
"0.48379087",
"0.48331505",
"0.48277253",
"0.48... | 0.7263297 | 0 |
This function creates a new hdf5 file in the active directory taking as the sole argument a string name for the file. | def new_hdf5(new_filename):
# handling input errors
if not isinstance(new_filename, str):
raise TypeError('Passed value of `filename` is not a string! Instead, it is: '
+ str(type(new_filename)))
# w- mode will create a file and fail if the file already exists
hdf5 = h5py... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_file(self, filepath):\n folder, _filename = os.path.split(filepath)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n file = h5py.File(filepath, 'a')\n return file",
"def save_as_hdf5(self, filename):",
"def hdf5_file(self):\n if self._hdf5_file i... | [
"0.7140792",
"0.6980622",
"0.68066597",
"0.6773389",
"0.6753648",
"0.6693808",
"0.65225184",
"0.6473293",
"0.6460949",
"0.63484126",
"0.6270696",
"0.6265804",
"0.62491304",
"0.62026066",
"0.61233056",
"0.6107673",
"0.6106745",
"0.6065954",
"0.60572034",
"0.60490173",
"0.60163... | 0.740254 | 0 |
This function adds Raman calibration data to an existing hdf5 file. It uses the spectrafit.fit_data function to fit the data before saving the fit result and the raw data to the hdf5 file. | def add_calibration(hdf5_filename, data_filename, label=None):
# handling input errors
if not isinstance(hdf5_filename, str):
raise TypeError('Passed value of `cal_filename` is not a string! Instead, it is: '
+ str(type(hdf5_filename)))
if not hdf5_filename.split('/')[-1].spl... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self,data): \n \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n units = {'A':'K','x0':'degrees','y0':'degrees','... | [
"0.62519884",
"0.6020593",
"0.57739514",
"0.5635872",
"0.5571112",
"0.543021",
"0.5419918",
"0.54129684",
"0.5387982",
"0.53669906",
"0.53284085",
"0.53150004",
"0.5314605",
"0.53093696",
"0.530017",
"0.5263094",
"0.52598375",
"0.5245885",
"0.5220459",
"0.5217278",
"0.5214079... | 0.6727075 | 0 |
This function adds Raman experimental data to an existing hdf5 file. It uses the spectrafit.fit_data function to fit the data before saving the fit result and the raw data to the hdf5 file. The data_filename must be in a standardized format to interact properly with this function. It must take the form anyname_temp_tim... | def add_experiment(hdf5_filename, exp_filename):
# handling input errors
if not isinstance(hdf5_filename, str):
raise TypeError('Passed value of `hdf5_filename` is not a string! Instead, it is: '
+ str(type(hdf5_filename)))
if not hdf5_filename.split('/')[-1].split('.')[-1] =... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_calibration(hdf5_filename, data_filename, label=None):\n # handling input errors\n if not isinstance(hdf5_filename, str):\n raise TypeError('Passed value of `cal_filename` is not a string! Instead, it is: '\n + str(type(hdf5_filename)))\n if not hdf5_filename.split('/... | [
"0.62783426",
"0.54187465",
"0.52853376",
"0.52826935",
"0.5281087",
"0.5270762",
"0.52683157",
"0.5252653",
"0.52074903",
"0.51874214",
"0.5176318",
"0.5172828",
"0.51372814",
"0.5134237",
"0.51305115",
"0.5124579",
"0.511208",
"0.5110209",
"0.5110135",
"0.5080256",
"0.50620... | 0.7044707 | 0 |
Function that allows the user to manually add or remove peaks from the automatic spectra fitting by inputing an add_list and/or a drop_list. The function pulls some data from the existing fit and overwrites it with the new results. | def adjust_peaks(hdf5_file, key, add_list=None, drop_list=None, plot_fits=False):
# handling input errors
if not isinstance(hdf5_file, str):
raise TypeError('Passed value of `hdf5_file` is not a string! Instead, it is: '
+ str(type(hdf5_file)))
if not hdf5_file.split('/')[-1]... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fitPeaks(self, new_peaks, peaks_type):\n # Check if we need to do anything.\n if (new_peaks[\"x\"].size > 0):\n\n # Update status of current peaks (if any) that are near\n # to the new peaks that are being added.\n #\n if (self.mfitter.getNFit() > 0):\n... | [
"0.56082404",
"0.5501705",
"0.53077525",
"0.52218217",
"0.50149703",
"0.4998173",
"0.4938318",
"0.49304774",
"0.48862016",
"0.48674485",
"0.48613563",
"0.4812949",
"0.48021144",
"0.47739965",
"0.4761532",
"0.4754908",
"0.47529873",
"0.47505748",
"0.4730679",
"0.4723984",
"0.4... | 0.6284394 | 0 |
This function prints out a display of the contents of any hdf5 file. It prints the filename followed by a list of the groups and datasets in a familiar directory/file format. Groups (folders appear bold) while datasets (files) appear in a standard font. | def view_hdf5(filename):
# handling input errors
if not isinstance(filename, str):
raise TypeError('Passed value of `filename` is not a string! Instead, it is: '
+ str(type(filename)))
if not filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`file... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_h5(fname: str) -> None:\n try:\n with h5py.File(fname, 'r') as h:\n print(fname)\n recursively_print_structure(h, ' ')\n except IOError as e:\n print(f\"Cannot open HDF5 file {fname}\")\n print(f\"IOError: {e}\")",
"def printAllColumnsInH5(pathToData):\... | [
"0.6818465",
"0.6535027",
"0.64906377",
"0.63089246",
"0.60178846",
"0.5999033",
"0.5991033",
"0.5967706",
"0.5928091",
"0.59176594",
"0.58546895",
"0.57313806",
"0.571641",
"0.56861824",
"0.5656443",
"0.5614497",
"0.55950373",
"0.5567397",
"0.55614275",
"0.5533533",
"0.55089... | 0.74592173 | 0 |
cast sha256 to int | def sha256(cls, value):
assert type(value) is str
return int(sha256(value.encode()).hexdigest(), 16) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hashToInt(h):\n orderBits = Curve.N.bit_length()\n orderBytes = (orderBits + 7) // 8\n if len(h) > orderBytes:\n h = h[:orderBytes]\n\n ret = int.from_bytes(h, byteorder=\"big\")\n excess = len(h) * 8 - orderBits\n if excess > 0:\n ret = ret >> excess\n return ret",
"def ha... | [
"0.7448692",
"0.72365516",
"0.7121622",
"0.7021218",
"0.68548185",
"0.68216866",
"0.6709854",
"0.66627985",
"0.66617006",
"0.66617006",
"0.6653154",
"0.6538216",
"0.64865804",
"0.6485043",
"0.64513963",
"0.6443496",
"0.64202505",
"0.6406009",
"0.64018744",
"0.6398126",
"0.637... | 0.7255353 | 1 |
Process all examples in the input directory. Filenames should be of the form CLASSNAMEEXAMPLENAME.yaml E.g Person001.yaml | def process_examples(self):
input_dir = self.input_directory
counter_example_dir = self.counter_example_input_directory
if input_dir is None:
input_dir = Path.cwd() / "examples"
if counter_example_dir is None:
counter_example_dir = Path.cwd() / "counter_examples"
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_yamls(folder):\n for item in iglob(folder + \"/*.yaml\"):\n data_file = os.path.join(folder, item)\n data = yaml.load(open(data_file))\n load_data(data)",
"def generate_yaml_tests(directory):\n for yml_file in directory.glob(\"*/*.yml\"):\n data = yaml.safe_load(yml_... | [
"0.6632809",
"0.62900645",
"0.62750703",
"0.61933035",
"0.6179182",
"0.6156422",
"0.60064507",
"0.5971663",
"0.5965593",
"0.5941546",
"0.59226394",
"0.59129375",
"0.5911364",
"0.58914095",
"0.58837914",
"0.58785766",
"0.5834951",
"0.583384",
"0.5827961",
"0.58051383",
"0.5797... | 0.74208486 | 0 |
Get the list of example source inputs. | def example_source_inputs(self, class_name: str = None) -> List[str]:
input_dir = self.input_directory
if input_dir is None:
return []
all_inputs = []
for fmt in self.input_formats:
glob_expr = f"*.{fmt}"
if class_name is not None:
glob... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_train_inputs(self, example):\n return example",
"def inputs(self) -> List[str]:\n return self._model.inputs",
"def get_inputs(self):\n return self.inputs",
"def prepare_inputs(example):\n return example['input_ids'], example['label_ids']",
"def prepare_inputs(example):\n ... | [
"0.7428947",
"0.7054078",
"0.6997518",
"0.6961838",
"0.6961838",
"0.6924607",
"0.6878051",
"0.6812642",
"0.6788958",
"0.6788958",
"0.6788958",
"0.6773123",
"0.6742478",
"0.67293483",
"0.67270637",
"0.6706153",
"0.6706153",
"0.6660394",
"0.6660394",
"0.6660394",
"0.66560566",
... | 0.7485938 | 0 |
Load an object from a dict, using the target class to determine the type of object to create. | def _load_from_dict(self, dict_obj: Any, target_class: Union[str, ElementName] = None) -> Any:
if not self.use_type_designators:
return dict_obj
sv = self.schemaview
if target_class is None:
target_class_names = [c.name for c in sv.all_classes().values() if c.tree_root]
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_obj_by_type_from_dict(self):\n test_obj = {}\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertIsInstance(returned_obj, self.tested_class)",
"def from_dict(cls, obj):\r\n raise NotImplementedError",
"def load(d):\n\n def _load(d):\n ... | [
"0.70920396",
"0.6989815",
"0.6780296",
"0.6534313",
"0.65280795",
"0.650231",
"0.64138657",
"0.6346336",
"0.625308",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.6... | 0.7511082 | 0 |
Finds fused batch norm layers and folds them into preceding layers. | def _FoldFusedBatchNorms(graph):
for match in _FindFusedBatchNorms(graph):
scope, sep, _ = match.layer_op.name.rpartition('/')
# Make sure new ops are added to `graph` and put on the same device as
# `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope
# named `scope`. Otherwise, TF ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)",
"def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling cod... | [
"0.72084844",
"0.70753294",
"0.6888734",
"0.6833401",
"0.65570444",
"0.63252455",
"0.6297912",
"0.6279781",
"0.62624854",
"0.6249418",
"0.62053025",
"0.61905295",
"0.6186012",
"0.6178468",
"0.6162257",
"0.6147891",
"0.61445254",
"0.60991395",
"0.6081668",
"0.6066008",
"0.6028... | 0.81622905 | 0 |
Clones layer_op with input_tensor and weight_tensor as new inputs. | def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor):
new_layer_name = layer_op.name.split('/')[-1] + '_Fold'
if layer_op.type == 'Conv2D':
return nn_ops.conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _CloneOp(op, new_name, new_inputs):\n inputs = list(op.inputs)\n for new_input in new_inputs:\n inputs[new_input[0]] = new_input[1]\n return _OP_CLONER.Clone(op, inputs, new_name)",
"def build(self, input_layer, trainable=True):\n\n with tf.variable_scope(self.name):\n # Determine the size... | [
"0.62645614",
"0.6237233",
"0.6156408",
"0.61453366",
"0.5968321",
"0.58285654",
"0.5824187",
"0.58094114",
"0.58049345",
"0.5748741",
"0.5666261",
"0.5660313",
"0.5655159",
"0.5610911",
"0.5606715",
"0.55708444",
"0.55697495",
"0.5538788",
"0.5504511",
"0.547193",
"0.5451717... | 0.7447388 | 0 |
Finds all ops and tensors related to found FusedBatchNorms. | def _FindFusedBatchNorms(graph):
input_pattern = graph_matcher.OpTypePattern('*')
weight_pattern = graph_matcher.OpTypePattern('*')
gamma_pattern = graph_matcher.OpTypePattern('*')
beta_pattern = graph_matcher.OpTypePattern('*')
mean_pattern = graph_matcher.OpTypePattern('*')
variance_pattern = graph_matche... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Othe... | [
"0.68619853",
"0.6494927",
"0.60702705",
"0.5982493",
"0.5894786",
"0.5847638",
"0.57264715",
"0.5440104",
"0.5178902",
"0.5135283",
"0.5134985",
"0.5104784",
"0.5068771",
"0.5065753",
"0.5037282",
"0.5032491",
"0.50149405",
"0.50096035",
"0.5000106",
"0.4991522",
"0.49882165... | 0.7717176 | 0 |
Gets tensors needed for FusedBatchNormMatch from match_result. | def _GetCommonTensors(match_result, bn_op, bn_input_tensor):
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
beta_tensor = match_result.get_tensor(beta_pattern)
# FusedBatchNorm in tra... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _FindFusedBatchNorms(graph):\n input_pattern = graph_matcher.OpTypePattern('*')\n weight_pattern = graph_matcher.OpTypePattern('*')\n gamma_pattern = graph_matcher.OpTypePattern('*')\n beta_pattern = graph_matcher.OpTypePattern('*')\n mean_pattern = graph_matcher.OpTypePattern('*')\n variance_pattern = g... | [
"0.687296",
"0.53285825",
"0.52761436",
"0.526731",
"0.52641547",
"0.52601016",
"0.52112883",
"0.5158161",
"0.51153666",
"0.51105905",
"0.5093035",
"0.5083832",
"0.5050526",
"0.50354075",
"0.50354075",
"0.50307226",
"0.50050557",
"0.49959216",
"0.49791405",
"0.49321866",
"0.4... | 0.6739167 | 1 |
Finds unfused batch norm layers and folds them into preceding layers. | def _FoldUnfusedBatchNorms(graph):
input_to_ops_map = input_to_ops.InputToOps(graph)
for bn in common.BatchNormGroups(graph):
has_scaling = _HasScaling(graph, input_to_ops_map, bn)
# The mangling code intimately depends on BatchNorm node's internals.
original_op, folded_op = _CreateFoldedOp(graph, bn,... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Othe... | [
"0.77741086",
"0.7156549",
"0.7024513",
"0.68224394",
"0.63824594",
"0.63595194",
"0.6346714",
"0.625679",
"0.6225848",
"0.6212077",
"0.620075",
"0.6187602",
"0.617762",
"0.6148358",
"0.6089603",
"0.60637486",
"0.6047825",
"0.6043735",
"0.60115445",
"0.60108745",
"0.60100204"... | 0.74272305 | 1 |
r"""Checks if batch norm has scaling enabled. | def _HasScaling(graph, input_to_ops_map, bn):
rsqrt_op = graph.get_operation_by_name(bn + '/BatchNorm/batchnorm/Rsqrt')
rsqrt_consumers = input_to_ops_map.ConsumerOperations(rsqrt_op)
return sum(1 for op in rsqrt_consumers if op.type == 'Mul') == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_scale_enabled(self) -> bool:\r\n ...",
"def scaling_enabled(self):\n return False",
"def isSetScale(self):\n return _libsbml.Unit_isSetScale(self)",
"def param_scale_check(shape_x, shape_scale):\n\n length_x = len(shape_x)\n length_scale = len(shape_scale)\n\n if not(leng... | [
"0.73828375",
"0.6997457",
"0.6757541",
"0.6416183",
"0.6400106",
"0.6291684",
"0.626143",
"0.6211953",
"0.6028917",
"0.6004665",
"0.59074646",
"0.5888298",
"0.5888298",
"0.5880715",
"0.582264",
"0.5821804",
"0.57957906",
"0.578953",
"0.5759011",
"0.57504267",
"0.574466",
"... | 0.71985847 | 1 |
Clones a given op, replaces its name and some of its inputs. | def _CloneOp(op, new_name, new_inputs):
inputs = list(op.inputs)
for new_input in new_inputs:
inputs[new_input[0]] = new_input[1]
return _OP_CLONER.Clone(op, inputs, new_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp",
"def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor):\n new_layer_name = layer_op.name.split('/')[-1] + '_Fold'\n if layer_op... | [
"0.61716413",
"0.6113947",
"0.5992132",
"0.5831337",
"0.5507165",
"0.5421506",
"0.5408626",
"0.5406843",
"0.5405116",
"0.5387802",
"0.5377177",
"0.53763574",
"0.53390443",
"0.5338112",
"0.53215635",
"0.5299693",
"0.52697754",
"0.5264059",
"0.5250165",
"0.5242738",
"0.5225992"... | 0.82067853 | 0 |
Makes sure that convolution inputs have compatible shapes. | def _AssertConvShapes(self, op_name, input_tensor, weights):
input_shape = input_tensor.get_shape()
weights_shape = weights.get_shape()
if (len(input_shape) != 4 or len(weights_shape) != 4 or
input_shape[3] != weights_shape[2]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_convolve_input_dim_check(self, case, fn, x_shape, y_shape):\n x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)\n\n message = [\n \"The operands must be the same dimension\",\n \"Leadin... | [
"0.70430326",
"0.6984046",
"0.67752093",
"0.67076695",
"0.6631883",
"0.6590607",
"0.65526205",
"0.6539452",
"0.65392506",
"0.65136945",
"0.6513212",
"0.6502569",
"0.64620143",
"0.64320785",
"0.6431594",
"0.6411467",
"0.64058405",
"0.63952243",
"0.63517404",
"0.6339218",
"0.63... | 0.732145 | 0 |
Makes sure that FC layer inputs have compatible shapes. | def _AssertFCShapes(self, op_name, weights, input_tensor):
weights_shape = weights.get_shape()
input_shape = input_tensor.get_shape()
if (len(weights_shape) != 2 or len(input_shape) != 2 or
weights_shape[1] != input_shape[0]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _AssertConvShapes(self, op_name, input_tensor, weights):\n input_shape = input_tensor.get_shape()\n weights_shape = weights.get_shape()\n if (len(input_shape) != 4 or len(weights_shape) != 4 or\n input_shape[3] != weights_shape[2]):\n raise ValueError('Incompatible shapes for op %s inputs:... | [
"0.7000613",
"0.6832547",
"0.6529788",
"0.64534384",
"0.63770324",
"0.6370928",
"0.6363974",
"0.6346374",
"0.633438",
"0.63139635",
"0.6271003",
"0.6215813",
"0.6161302",
"0.6155026",
"0.6152465",
"0.614528",
"0.61381274",
"0.61302507",
"0.61197114",
"0.61083287",
"0.6102161"... | 0.7046982 | 0 |
Makes sure that shapes of input and output tensors are compatible. | def _AssertShapesMatch(op_name, in_tensor, out_tensor):
in_shape = in_tensor.get_shape()
out_shape = out_tensor.get_shape()
if not in_shape.is_compatible_with(out_shape):
raise ValueError('%s should not change tensor shape: input %s, '
'output %s' % (op_name, in_shape, out_shape)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_compatible_with(self, inputs): # pylint:disable=useless-super-delegation\n if self.shape is None:\n return False\n if len(inputs) != len(self):\n raise ValueError('Expects ' +\n str(len(self)) + ' inputs, '\n ... | [
"0.69564956",
"0.689856",
"0.6888036",
"0.6857275",
"0.67998415",
"0.67264485",
"0.662843",
"0.64965355",
"0.6468758",
"0.6395005",
"0.63594204",
"0.6335894",
"0.6281145",
"0.6273137",
"0.6260043",
"0.6223739",
"0.6200478",
"0.61994135",
"0.61986095",
"0.6178359",
"0.6171779"... | 0.71506196 | 0 |
Sets the server_enabled of this FtsSftpSettings. | def server_enabled(self, server_enabled):
self._server_enabled = server_enabled | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_server(self, server):\n log.info(\"Enabling %s in netscaler\", server)\n return self.post(\"server?action=enable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"def set_dhcpserver_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVirtNet_SetDHCPServe... | [
"0.64041066",
"0.62101185",
"0.6115301",
"0.57635754",
"0.5666829",
"0.560498",
"0.5532448",
"0.55312526",
"0.55182683",
"0.55100137",
"0.54706293",
"0.5436376",
"0.53900456",
"0.5380313",
"0.5367764",
"0.53609276",
"0.5244654",
"0.52353865",
"0.5225218",
"0.5225218",
"0.5204... | 0.7935785 | 0 |
Sets the authentication_method of this FtsSftpSettings. | def authentication_method(self, authentication_method):
self._authentication_method = authentication_method | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authentication_methods(self, authentication_methods):\n\n self._authentication_methods = authentication_methods",
"def auth_method(self):\n return self.settings[\"authMethod\"]",
"def auth_method(self):\n return self[\"authMethod\"]",
"def auth_method(self) -> Optional[pulumi.Input[s... | [
"0.62380636",
"0.61203897",
"0.5855358",
"0.55694807",
"0.5538085",
"0.5522016",
"0.5440951",
"0.53794426",
"0.53782594",
"0.5377386",
"0.5356047",
"0.5349534",
"0.52542967",
"0.5180398",
"0.5180398",
"0.51782846",
"0.5156288",
"0.50967616",
"0.5048462",
"0.501886",
"0.501095... | 0.7283392 | 0 |
Sets the keystore_file_path of this FtsSftpSettings. | def keystore_file_path(self, keystore_file_path):
self._keystore_file_path = keystore_file_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keystore_file_password(self, keystore_file_password):\n\n self._keystore_file_password = keystore_file_password",
"def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n ... | [
"0.7145288",
"0.6014983",
"0.58418983",
"0.55596626",
"0.5433482",
"0.5313241",
"0.51829666",
"0.5103493",
"0.5063631",
"0.49352625",
"0.49106106",
"0.48667493",
"0.48239157",
"0.48141515",
"0.4736292",
"0.46992692",
"0.4678572",
"0.46774423",
"0.4635615",
"0.46148446",
"0.46... | 0.7703561 | 0 |
Sets the keystore_file_password of this FtsSftpSettings. | def keystore_file_password(self, keystore_file_password):
self._keystore_file_password = keystore_file_password | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_https_keystore_key_password(self, org_apache_felix_https_keystore_key_password: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore_key_password = org_apache_felix_https_keystore_key_password",
"def org_apache_felix_https_keystore_password(self, org_apache_felix_https... | [
"0.697365",
"0.6841448",
"0.6515118",
"0.6424852",
"0.60550404",
"0.5957332",
"0.5679264",
"0.56787336",
"0.56772876",
"0.56676793",
"0.5628823",
"0.55949026",
"0.55878115",
"0.5577493",
"0.5540421",
"0.551728",
"0.5511115",
"0.54981464",
"0.54964054",
"0.5444806",
"0.5444743... | 0.8196835 | 0 |
Sets the ciphers of this FtsSftpSettings. | def ciphers(self, ciphers):
self._ciphers = ciphers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ciphers(self) -> Sequence[str]:\n return pulumi.get(self, \"ciphers\")",
"def ciphers(self) -> Sequence[str]:\n return pulumi.get(self, \"ciphers\")",
"def ciphers(self):\n return self._ciphers",
"def set_ssl(self):\n for params in self.config.get_ssl_params():\n se... | [
"0.63286173",
"0.63286173",
"0.6211777",
"0.55823",
"0.53944427",
"0.52489173",
"0.5057643",
"0.49852008",
"0.4932068",
"0.4884324",
"0.48747385",
"0.48445147",
"0.48318604",
"0.48281583",
"0.48003778",
"0.47863695",
"0.47562948",
"0.47462425",
"0.47110054",
"0.46554583",
"0.... | 0.7703951 | 0 |
Sets the known_users_file_path of this FtsSftpSettings. | def known_users_file_path(self, known_users_file_path):
self._known_users_file_path = known_users_file_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __parse_user_keyfiles(self):\n\n user_sshdir = os.path.expanduser('~/.ssh')\n if not os.path.isdir(user_sshdir):\n return\n\n paths = []\n for filename in os.listdir(user_sshdir):\n if filename in SSH_CONFIG_FILES or os.path.splitext(filename)[1] != '.pub':\n ... | [
"0.57418454",
"0.5557294",
"0.54986745",
"0.5214731",
"0.5214731",
"0.5180744",
"0.5055465",
"0.5035089",
"0.50259876",
"0.4974094",
"0.496511",
"0.49633723",
"0.4950638",
"0.49499902",
"0.48848796",
"0.48848796",
"0.48848796",
"0.4883349",
"0.48802492",
"0.4863774",
"0.48286... | 0.8051893 | 0 |
Sets the overridden_users_home_directories of this FtsSftpSettings. | def overridden_users_home_directories(self, overridden_users_home_directories):
self._overridden_users_home_directories = overridden_users_home_directories | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_share_user_home_dir(self, bShareUserHomeDir):\n\t\tcall_sdk_function('PrlVmCfg_SetShareUserHomeDir', self.handle, bShareUserHomeDir)",
"def set_user_home(self, path):\n os.environ['HOME'] = path",
"def set_user_home(self, path):\n os.environ['HOME'] = path",
"def homeDirectory(self, ign... | [
"0.6600052",
"0.6430041",
"0.6430041",
"0.6197573",
"0.57821155",
"0.5754564",
"0.57503104",
"0.5628441",
"0.5487617",
"0.54494035",
"0.54009694",
"0.53437734",
"0.53021526",
"0.5258358",
"0.5253186",
"0.52394444",
"0.5199579",
"0.5132827",
"0.5066063",
"0.5064636",
"0.506463... | 0.8290534 | 0 |
article is initialized with xml text contained inside tags | def __init__(self, article_xml):
self.article_xml = article_xml
self.links = self.grab_links()
self.first_link = self.parse_first_link() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, txt='', unicodeEncoding='utf-8'):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup, self).__init__(__txt=None,\n ... | [
"0.6530297",
"0.6436172",
"0.631786",
"0.62645775",
"0.61489284",
"0.61476105",
"0.61120623",
"0.6110277",
"0.6025093",
"0.5990031",
"0.59805065",
"0.59666455",
"0.5903457",
"0.5874341",
"0.58594066",
"0.5852814",
"0.5852356",
"0.5843398",
"0.58399487",
"0.58204275",
"0.58045... | 0.6952109 | 0 |
returns a list of the outermost links not in parenthesis a tempalte, or a tag | def grab_links(self):
links = []
link_char = []
w_temp = [] #in template?
par = [] #in parentheses?
rtag = [] #in <ref> tag?
dtag = [] #in <div> tag?
skip_char = []
for i, c in enumerate(self.article_xml):
if i in skip_char: continue #elimina... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_substitution_image_links(links):\n return [link for link in links if '{' not in link]",
"def getExpandedLinks():",
"def removeHtmlTags(self, text):\n sb = []\n text = self.removeHtmlComments(text)\n bits = text.split(u'<')\n sb.append(bits.pop(0))\n tagstack = [... | [
"0.6023677",
"0.592088",
"0.57580185",
"0.5751759",
"0.5707651",
"0.56727266",
"0.55507445",
"0.55107826",
"0.5486449",
"0.5462063",
"0.5458863",
"0.54393977",
"0.5430559",
"0.5428695",
"0.54271686",
"0.5418311",
"0.538971",
"0.53824365",
"0.5366302",
"0.5362929",
"0.52836615... | 0.67386085 | 0 |
filters links to images, files, or other Wikimedia projects returns false if it's an invalid link (including links with a colon) | def check_link(self, link):
false_links = ["wikipedia:", "w:", "wikitionary:", "wikt:", "wikinews:",
"n:", "wikibooks:", "b:", "wikiquote:", "q:", "wikisource:",
"s:", "wikispecies:", "species:", "wikiversity", "v:",
"wikivoyage:", "voy:",... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.starts... | [
"0.68379533",
"0.67308575",
"0.6564884",
"0.6558733",
"0.6462883",
"0.6375301",
"0.6374791",
"0.6300895",
"0.62788117",
"0.6265837",
"0.62631965",
"0.62593323",
"0.622585",
"0.62170655",
"0.6212652",
"0.6198974",
"0.6183221",
"0.61607987",
"0.61497504",
"0.6100527",
"0.607530... | 0.67699367 | 1 |
strips brackets, returns link destination (not display name) | def clean_link(self, link):
link = link.strip("[]")
if "|" in link:
link = link.split("|",1)[0]
link = link.strip() #remove trailing white space
return link | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_link(self, link):\n new_link = \"/\".join(link.split(\"/\")[0:3])\n return \"http://www.imdb.com\" + new_link",
"def remove_links(str):\n stripped_str = re.sub(\"\\[.*\\]\",\"\", str)\n str_list = filter(None, stripped_str.split(\" \"))\n built_string = \" \".join(str_list)\n ... | [
"0.6074059",
"0.6058749",
"0.5964363",
"0.59119755",
"0.58833617",
"0.5830744",
"0.57996374",
"0.57897335",
"0.57738227",
"0.5760817",
"0.5757934",
"0.5745697",
"0.5745697",
"0.5744635",
"0.57165736",
"0.57104677",
"0.57055384",
"0.5694247",
"0.5692231",
"0.5687585",
"0.56484... | 0.6704387 | 0 |
Evaluate quality of the fit result. Subclasses can override this method to do post analysis. | def _evaluate_quality(self, fit_data: FitData) -> Union[str, None]:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]:\n freq_increment = np.mean(np.diff(fit_data.x_data))\n\n fit_a = fit_data.ufloat_params[\"a\"]\n fit_b = fit_data.ufloat_params[\"b\"]\n fit_freq = fit_data.ufloat_params[\"freq\"]\n fit_kappa = fit_... | [
"0.7156114",
"0.684477",
"0.657213",
"0.65535766",
"0.64372116",
"0.63338166",
"0.6271525",
"0.62638944",
"0.62541264",
"0.62117773",
"0.6151621",
"0.606927",
"0.60639936",
"0.60630333",
"0.60455346",
"0.6035397",
"0.59774005",
"0.5925373",
"0.59253347",
"0.59226096",
"0.5920... | 0.7637929 | 0 |
Extract curve data from experiment data. This method internally populates two types of curve data. | def _extract_curves(
self, experiment_data: ExperimentData, data_processor: Union[Callable, DataProcessor]
):
self.__processed_data_set = list()
def _is_target_series(datum, **filters):
try:
return all(datum["metadata"][key] == val for key, val in filters.items()... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ex_curve(data):\n rv = []\n try:\n ef = autocomplete_curve_function(data[0])\n ed = autocomplete_curve_direction(data[1])\n period = 2\n try:\n period = max(int(data[2]), 2)\n except ValueError:\n pass\n data = data[3:]\n if not data:... | [
"0.6097595",
"0.59153706",
"0.5720998",
"0.56816626",
"0.55583704",
"0.5506386",
"0.5458484",
"0.54564375",
"0.5437237",
"0.5421108",
"0.54172045",
"0.54148316",
"0.54103583",
"0.53691983",
"0.5329527",
"0.52916807",
"0.5291498",
"0.52747667",
"0.5228793",
"0.5213298",
"0.520... | 0.6852174 | 0 |
Return type of experiment. | def _experiment_type(self) -> str:
try:
return self.__experiment_metadata["experiment_type"]
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def experiment_type(filename):\n assert(isinstance(filename, str))\n exp_type = filename.split('/')[-1].split('.')[-2].split('_')[1:-1]\n exp_type = '_'.join(exp_type)\n logger.debug('{} is of type {}'.format(filename, exp_type))\n return exp_type",
"def get_test_type(self):\n return self.t... | [
"0.701162",
"0.69926196",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"... | 0.8181823 | 0 |
Getter for physical qubit indices. | def _physical_qubits(self) -> List[int]:
try:
return list(self.__experiment_metadata["physical_qubits"])
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def indices(self):\n return self._kbounded_partitions",
"def get_indices(self):\r\n return self._indices",
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def jw_number_indices(n_electrons, n_qubits):\n occupations = itertools.combinations(range(n_qubits), n_electr... | [
"0.66136235",
"0.63095975",
"0.6267602",
"0.62219816",
"0.6181241",
"0.6174731",
"0.6135461",
"0.5949904",
"0.5915764",
"0.58870023",
"0.58162713",
"0.58161235",
"0.5783942",
"0.5758474",
"0.57396424",
"0.56639326",
"0.5659677",
"0.5655574",
"0.56555057",
"0.5643877",
"0.5641... | 0.6819982 | 0 |
Getter for backend object. | def _backend(self) -> Backend:
return self.__backend | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backend(self):\n # This never changes (so no read locking needed).\n return self._backend",
"def get_backend():\n return _BACKEND",
"def get_backend():\n return Connection()",
"def get_backend():\n return __SETTINGS__._BACKEND",
"def backend_object(self, id):\n return self.m... | [
"0.79623115",
"0.76062316",
"0.7487151",
"0.7388517",
"0.72666264",
"0.7238288",
"0.71368957",
"0.7134815",
"0.7086919",
"0.7014857",
"0.6954881",
"0.6920183",
"0.6918006",
"0.6918006",
"0.6909595",
"0.690837",
"0.690837",
"0.67804307",
"0.6756487",
"0.6732792",
"0.66931",
... | 0.8183749 | 0 |
Return the experiment options of given job index. | def _experiment_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["experiment_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _run_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"run_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n ... | [
"0.71390533",
"0.6975858",
"0.6919278",
"0.621385",
"0.59916735",
"0.580255",
"0.5618967",
"0.549171",
"0.54512733",
"0.5414998",
"0.53518206",
"0.5308031",
"0.5284357",
"0.5283639",
"0.5231703",
"0.5185954",
"0.5171701",
"0.51661193",
"0.50663817",
"0.50663465",
"0.50529015"... | 0.80677307 | 0 |
Returns the analysis options of given job index. | def _analysis_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["analysis_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
retu... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found... | [
"0.6743674",
"0.6663001",
"0.6280733",
"0.6069232",
"0.60599047",
"0.565759",
"0.54964",
"0.5447708",
"0.54197335",
"0.53915113",
"0.53473103",
"0.53200793",
"0.52881956",
"0.52273625",
"0.51928836",
"0.5185036",
"0.5124009",
"0.51195866",
"0.5102956",
"0.5085236",
"0.5047777... | 0.7922862 | 0 |
Returns the run options of given job index. | def _run_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["run_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found... | [
"0.64497",
"0.6384364",
"0.62348664",
"0.61879486",
"0.6184877",
"0.59024817",
"0.5853503",
"0.56605256",
"0.5530498",
"0.5475739",
"0.54434043",
"0.53275234",
"0.5277181",
"0.5277181",
"0.5277181",
"0.5276527",
"0.5255759",
"0.5234088",
"0.523196",
"0.522801",
"0.52041173",
... | 0.79151005 | 0 |
Returns the transpile options of given job index. | def _transpile_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["transpile_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
re... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_job_options(self):\n argument = [string.Template(self.queue.template[key]).substitute(\n {key : value}) for key, value in self.options.items()]\n\n if len(self.custom_options) > 0:\n argument += self.custom_options\n\n return argument",
"def _experiment_... | [
"0.58368",
"0.56201595",
"0.54462826",
"0.5405374",
"0.5268604",
"0.51483375",
"0.514388",
"0.5095438",
"0.48473778",
"0.48377272",
"0.476263",
"0.47249606",
"0.47134674",
"0.46772844",
"0.46634972",
"0.465322",
"0.46220458",
"0.46066916",
"0.45873234",
"0.4573063",
"0.455892... | 0.7755575 | 0 |
Parse input kwargs with predicted input. Class attributes will be updated according to the ``options``. For example, if ``options`` has a key ``p0``, and the class has an attribute named ``__p0``, then the attribute ``__0p`` will be updated to ``options["p0"]``. Options that don't have matching attributes will be inclu... | def _arg_parse(self, **options) -> Dict[str, Any]:
extra_options = dict()
for key, value in options.items():
private_key = f"__{key}"
if hasattr(self, private_key):
setattr(self, private_key, value)
else:
extra_options[key] = value
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_options(options):\n opts = dict()\n for attr in dir(options):\n if attr.startswith(\"__\"):\n continue\n opts[attr] = getattr(options, attr)\n return opts",
"def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(dqpsk_m... | [
"0.6199073",
"0.6011524",
"0.6008918",
"0.5943669",
"0.59151167",
"0.5756908",
"0.5742022",
"0.56702465",
"0.5639711",
"0.5626554",
"0.55926645",
"0.5560618",
"0.55327994",
"0.54897285",
"0.54338837",
"0.54048324",
"0.5363454",
"0.52499473",
"0.5185636",
"0.5124794",
"0.50433... | 0.63792646 | 0 |
Key generator that allows to switch between keys that are provided in the `secret_key.txt` file. | def switch_key():
with open("secret_key.txt", 'r') as key_file:
api_keys = key_file.read().splitlines()
for api_key in api_keys:
yield api_key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)",
"def setup_keys():\n if os.path.isfil... | [
"0.7030336",
"0.6970633",
"0.69157135",
"0.6851234",
"0.665555",
"0.6652344",
"0.6556344",
"0.64819336",
"0.64733076",
"0.64401174",
"0.6436973",
"0.64132476",
"0.64103454",
"0.63922274",
"0.6378862",
"0.6355134",
"0.63407135",
"0.6338451",
"0.6336893",
"0.6334349",
"0.627503... | 0.75049704 | 0 |
High level hook called when a SIP has been deposited in a landing zone | def ingestPostProcSipDepositInLandingZone(dataObjectPath, user, zone):
logger.info("ingestPostProcSipDepositInLandingZone()")
logger.info("dataObjectPath: %s" % dataObjectPath)
logger.info("user:%s" % user)
logger.info("zone:%s" % zone) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def place_call_offhold(self) -> None:",
"def place_call_onhold(self) -> None:",
"def _extract_kiss_destination(self):\n self.destination = aprs.Callsign(self.frame)",
"def ring_zone(self, tissue):\n print(\"controller - ring_zone!\")\n self.view.processing_gui.ask_ring_out(tissue)",
"d... | [
"0.5777385",
"0.5624411",
"0.5509368",
"0.54877645",
"0.5309101",
"0.52809733",
"0.5173685",
"0.514805",
"0.50955397",
"0.50581396",
"0.50581396",
"0.5057988",
"0.5046991",
"0.50424355",
"0.4994274",
"0.49913985",
"0.49630877",
"0.49369216",
"0.49329975",
"0.49219167",
"0.492... | 0.5720877 | 1 |
Do API calls, and save data in cache files. | def do_api_calls_update_cache(self):
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, *args, **kw):\n cachepath = self.cachepath(*args, **kw)\n try:\n # try returning from cache first\n return self.loadcache(cachepath)\n except IOError:\n # not found, so run api query\n self._sleep()\n self.lastcall = tim... | [
"0.6925335",
"0.6491691",
"0.6327244",
"0.6154643",
"0.60999835",
"0.60896784",
"0.60562545",
"0.6047197",
"0.5878853",
"0.5847318",
"0.57860565",
"0.5767712",
"0.5724594",
"0.57162315",
"0.57134306",
"0.56965476",
"0.565406",
"0.56492305",
"0.5622184",
"0.56044537",
"0.56002... | 0.71164197 | 0 |
Makes an Linode API call to get the list of nodes. | def get_nodes(self):
try:
for node in Linode.search(status=Linode.STATUS_RUNNING):
self.add_node(node)
except chube_api.linode_api.ApiError, e:
print "Looks like Linode's API is down:"
print
print e
sys.exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()",
"def get_nodes(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/nodes\"\n\n _response = self.connector.http_call(\"get\", _url)\n\n # Create the Nodes array but cleanup cache if there is o... | [
"0.7121502",
"0.6617446",
"0.65728307",
"0.6527529",
"0.64565825",
"0.6433634",
"0.6416782",
"0.6389691",
"0.6355934",
"0.6353988",
"0.6350259",
"0.6307714",
"0.62806284",
"0.62762433",
"0.6274298",
"0.6197774",
"0.61561686",
"0.60973465",
"0.6084577",
"0.60462004",
"0.604404... | 0.71513474 | 0 |
Creates self._datacenter_cache, containing all Datacenters indexed by ID. | def populate_datacenter_cache(self):
self._datacenter_cache = {}
dcs = Datacenter.search()
for dc in dcs:
self._datacenter_cache[dc.api_id] = dc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Datacenters(self):\n if not self._datacenters:\n dcs = self._get_objects(vim.Datacenter)\n for dc in dcs:\n self._datacenters[dc.name] = dc\n return self._datacenters",
"def get_datacenters_by(self, datacenter=None, tenant=None, **kwargs):\n if tenant... | [
"0.6505794",
"0.53940344",
"0.5058107",
"0.49381578",
"0.49252507",
"0.48534706",
"0.4819115",
"0.48064002",
"0.47601",
"0.47519144",
"0.4742259",
"0.4740057",
"0.4727511",
"0.47176874",
"0.47031915",
"0.4700527",
"0.4698371",
"0.46855637",
"0.4634698",
"0.46332663",
"0.46316... | 0.80962306 | 0 |
Returns a the lowercase city name of the node's data center. | def get_datacenter_city(self, node):
if self._datacenter_cache is None:
self.populate_datacenter_cache()
location = self._datacenter_cache[node.datacenter_id].location
location = location.lower()
location = location.split(",")[0]
return location | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def data_center_name(self) -> str:\n return pulumi.get(self, \"data_center_name\")",
"def data_center_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"data_center_name\")",
"def data_center_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"data_c... | [
"0.74766445",
"0.7180771",
"0.69038516",
"0.69038516",
"0.66939473",
"0.6678729",
"0.6621979",
"0.6542636",
"0.64575845",
"0.6278695",
"0.6257993",
"0.6257993",
"0.6257993",
"0.6257993",
"0.6257993",
"0.6226151",
"0.6226151",
"0.61920005",
"0.614394",
"0.614394",
"0.6131929",... | 0.7814764 | 0 |
Adds an node to the inventory and index. | def add_node(self, node):
public_ip = [addr.address for addr in node.ipaddresses if addr.is_public][0]
dest = public_ip
# Add to index
self.index[dest] = node.api_id
# Inventory: Group by node ID (always a group of 1)
self.inventory[node.label] = [dest]
# Inve... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_node(self, node: int) -> None:\r\n self.nodes.add(node)",
"def add_node(self, node):",
"def add_node(self, node):\n self.nodes.append(node)",
"def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True",
"def add_node(self, node):\n self.nodes.a... | [
"0.7500117",
"0.73828864",
"0.7321843",
"0.7307978",
"0.72790575",
"0.72646934",
"0.72339076",
"0.7178008",
"0.71437955",
"0.71437955",
"0.7089215",
"0.7021807",
"0.6995974",
"0.69755816",
"0.69561344",
"0.69527453",
"0.69520944",
"0.6948566",
"0.69292915",
"0.68842506",
"0.6... | 0.7719779 | 0 |
Pushed an element onto an array that may not have been defined in the dict. | def push(self, my_dict, key, element):
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def push(self, elem):\n pass",
"def push(self, new_element):\n self.array.append(new_element)",
"def push(self, new_element):\n self.arr.append(new_element)\n self.size += 1",
"def __setitem__(self, index, value):\n assert 0 <= index < len(self), \"Array subscript out of ra... | [
"0.61763823",
"0.60454845",
"0.60098",
"0.5906978",
"0.5880794",
"0.5880467",
"0.5862668",
"0.58508754",
"0.5771033",
"0.5766721",
"0.5749718",
"0.5748016",
"0.5746526",
"0.57447404",
"0.5734783",
"0.5690246",
"0.5680334",
"0.5669741",
"0.56696004",
"0.5660492",
"0.56445444",... | 0.6287549 | 0 |
Reads the inventory from the cache file and returns it as a JSON object. | def get_inventory_from_cache(self):
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_from_cache(self):\n try:\n with open(self.cache_filename, 'r') as cache:\n json_data = cache.read()\n data = json.loads(json_data)\n except IOError:\n data = {'data': {}, 'inventory': {}}\n\n self.data = data['data']\n self.invent... | [
"0.7557402",
"0.7366649",
"0.7024034",
"0.6800572",
"0.6741193",
"0.64853036",
"0.62057525",
"0.6125807",
"0.61111367",
"0.60826665",
"0.60408515",
"0.603484",
"0.6020536",
"0.60187274",
"0.6007673",
"0.598478",
"0.5969202",
"0.59221053",
"0.5900663",
"0.58964336",
"0.5871589... | 0.88659257 | 0 |
Reads the index from the cache file and sets self.index. | def load_index_from_cache(self):
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))",
"def _load_index(self):\n try:\n with open(self._index_path,... | [
"0.7139627",
"0.69223547",
"0.69004935",
"0.68209773",
"0.66870165",
"0.64805925",
"0.6415881",
"0.64003915",
"0.63989496",
"0.6365108",
"0.6176035",
"0.61550426",
"0.6151214",
"0.6092105",
"0.6086096",
"0.6045057",
"0.6004933",
"0.5955538",
"0.5945302",
"0.59057784",
"0.5904... | 0.8064955 | 0 |
Find the regular expression pattern s in dictionary. | def findPattern(self,s):
# pat = re.compile('^'+s+'$')
pat = re.compile(s)
results = {}
for k in self.__clidRep.keys():
if pat.match(str(k)) or pat.match(self.__clidRep[k]):
results[k] = self.__clidRep[k]
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_by_pattern(self):\n while True: \n word = input(\"Enter a regular expression ex: \\d\\d\\w+. Press Q to \"\n \"quit to the main screen: \")\n if word.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n return self.dict_list\n self.find... | [
"0.6469642",
"0.63880825",
"0.63732696",
"0.6253539",
"0.6212993",
"0.61480343",
"0.60889447",
"0.5976892",
"0.594639",
"0.5908699",
"0.5843748",
"0.57777935",
"0.5762092",
"0.5741424",
"0.5741424",
"0.57190794",
"0.57145727",
"0.56568784",
"0.56494045",
"0.5643466",
"0.56325... | 0.8029055 | 0 |
coverts devices to json string into | def devicelist_to_json(self):
devices_json = json.dumps(self.device_list)
print(devices_json) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def devices_json():\n return [\n {\n \"macAddress\": \"84:F3:EB:21:90:C4\",\n \"lastData\": {\n \"dateutc\": 1546889640000,\n \"baromrelin\": 30.09,\n \"baromabsin\": 24.61,\n \"tempinf\": 68.9,\n \"humidityi... | [
"0.74697614",
"0.6751789",
"0.65418833",
"0.6319735",
"0.61290795",
"0.6120263",
"0.60992014",
"0.60623235",
"0.60572946",
"0.6028789",
"0.5987714",
"0.5979106",
"0.597772",
"0.5972768",
"0.59643567",
"0.59492177",
"0.5925081",
"0.5899812",
"0.5844271",
"0.58301526",
"0.58090... | 0.72180307 | 1 |
returns an integer that respresents base_depth for specified date | def base_depth_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
new_date = str(date)
base_depth_to_return = None
query = "SELECT base_depth FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')" %(resort_table, date)
connection = get_connection()
if connection i... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def base_depth_average_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n date_month = int(date[4:6])\n date_day = int(date[6:8])\n query = \"SELECT base_depth FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS I... | [
"0.6894696",
"0.61948436",
"0.61282104",
"0.6101949",
"0.59978324",
"0.57817864",
"0.57461077",
"0.57212085",
"0.56724894",
"0.5652006",
"0.5621178",
"0.56116706",
"0.558995",
"0.5588037",
"0.5577575",
"0.55354685",
"0.5507787",
"0.54877305",
"0.54871655",
"0.54178995",
"0.54... | 0.7155536 | 0 |
returns average of base depth across all years on specific date | def base_depth_average_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
date_month = int(date[4:6])
date_day = int(date[6:8])
query = "SELECT base_depth FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat =... | [
"0.64000183",
"0.61367154",
"0.6118295",
"0.6117146",
"0.61015445",
"0.6100895",
"0.60893524",
"0.60777545",
"0.6077354",
"0.6042014",
"0.59638566",
"0.5926371",
"0.59044516",
"0.5842373",
"0.5823526",
"0.5815007",
"0.58064413",
"0.57317835",
"0.5730693",
"0.5663666",
"0.5653... | 0.7233447 | 0 |
returns int that is avg snowfall on this date over all years | def snowfall_average_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
date_month = int(date[4:6])
date_day = int(date[6:8])
query = "SELECT snowfall FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d" %(r... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def five_years_avg_dividend(self) -> float:\n return self._five_years_avg_dividend",
"def max_drawdown_cal_year(self) -> float:\n return float(self.tsdf.groupby([self.tsdf.index.year]).apply(\n lambda x: (x / x.expanding(min_periods=1).max()).min() - 1).min())",
"def av(self, data):\n ... | [
"0.6527509",
"0.63874155",
"0.6387261",
"0.63743424",
"0.62814784",
"0.61865",
"0.6170374",
"0.6097223",
"0.6072147",
"0.6064186",
"0.6035137",
"0.6027913",
"0.6025581",
"0.6023191",
"0.5905982",
"0.5880306",
"0.58751917",
"0.5872359",
"0.5859398",
"0.5838657",
"0.5838657",
... | 0.69543517 | 0 |
returns a date that had the highest snowfall during specified year | def highest_snowfall_for_year(resort_name, year):
resort_table = resort_table_dict[resort_name]
year = int(year)
query = "SELECT snowfall FROM %s WHERE CAST(EXTRACT(YEAR FROM status_date) AS INTEGER) = %d" %(resort_table, year)
connection = get_connection()
snowfall_list = []
if connection is ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maxyear():\n\n return datetime.MAXYEAR",
"def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year",
"def max_drawdown_cal_year(self) -> float:\n return float(self.tsdf.groupby([self.tsdf.index.year]).apply(\n lambda x: (x / x.expanding(min_pe... | [
"0.71644264",
"0.70825464",
"0.69648576",
"0.66781336",
"0.64653206",
"0.6443147",
"0.6366688",
"0.6256422",
"0.5998703",
"0.5982007",
"0.5957845",
"0.58945405",
"0.58911014",
"0.5854378",
"0.5853436",
"0.5780126",
"0.57732373",
"0.5755967",
"0.57521063",
"0.57442605",
"0.574... | 0.75053 | 0 |
returns list of snowfall for each date in the period | def snowfall_for_period(resort_name, start_date, end_date):
#yyyymmdd
start_date_year = int(start_date[0:4])
start_date_month = int(start_date[4:6])
start_date_day = int(start_date[6:8])
end_date_year = int(end_date[0:4])
end_date_month = int(end_date[4:6])
end_date_day = int(end_date[6:8]... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def snowfall_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n new_date = str(date)\n\n query = \"SELECT snowfall FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')\" %(resort_table, new_date)\n connection = get_connection()\n snowfall_to_return = None\n\n\n ... | [
"0.63649917",
"0.6109526",
"0.6082759",
"0.5757297",
"0.5724909",
"0.5718514",
"0.56843966",
"0.5634992",
"0.56042325",
"0.5594472",
"0.5592005",
"0.5579007",
"0.54786044",
"0.5469435",
"0.5457135",
"0.54524297",
"0.54033566",
"0.53840905",
"0.532132",
"0.53100914",
"0.530515... | 0.7388031 | 0 |
returns list of base_depth for each date in the period | def base_depth_for_period(resort_name, start_date, end_date):
start_date_year = int(start_date[0:4])
start_date_month = int(start_date[4:6])
start_date_day = int(start_date[6:8])
end_date_year = int(end_date[0:4])
end_date_month = int(end_date[4:6])
end_date_day = int(end_date[6:8])
resor... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def base_depth_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n new_date = str(date)\n base_depth_to_return = None\n query = \"SELECT base_depth FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')\" %(resort_table, date)\n\n connection = get_connection()\n\n ... | [
"0.6518969",
"0.60030866",
"0.5712574",
"0.54644364",
"0.5354301",
"0.5350619",
"0.53494644",
"0.53494644",
"0.53139776",
"0.52619964",
"0.5192515",
"0.51612735",
"0.5154456",
"0.5154456",
"0.5072636",
"0.50671273",
"0.50522",
"0.5036785",
"0.5036785",
"0.50244904",
"0.501594... | 0.73825467 | 0 |
Downloads the olivetti faces dataset and saves it in the output_filepath directory. | def main(output_filepath):
logger = logging.getLogger(__name__)
logger.info('Downloading Olivetti faces...')
olivetti_faces = fetch_olivetti_faces()
data = pd.DataFrame(data=np.apply_along_axis(exposure.equalize_hist, 1, olivetti_faces.data))
labels = pd.DataFrame(data=olivetti_faces.target)
l... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def download_glove ():\n # Get the URL ...\n print(\"Downloading https://nlp.stanford.edu/data/glove.6B.zip ...\")\n res = requests.get(\"https://nlp.stan... | [
"0.59466165",
"0.58815235",
"0.58095616",
"0.5797585",
"0.57857496",
"0.57724977",
"0.5553984",
"0.55409586",
"0.5527344",
"0.5513376",
"0.54741013",
"0.5404894",
"0.539826",
"0.53865135",
"0.5356633",
"0.5356331",
"0.53492486",
"0.53430045",
"0.5337607",
"0.5328182",
"0.5300... | 0.7823156 | 0 |
Perform 12 OT for Bob and return Alice's input list m_c without revealing c. | def Bob_OT(c, l, n=100):
# Error handling.
if c != 0 and c != 1:
raise Exception("Input argument c must be either 0 or 1.")
if l > n:
raise Exception("Input argument l cannot be greater than n.")
# (Step 1)
# Bob runs 1-2 ROT.
s_c = Bob_ROT(c, l, n)
# (Step 3)
# Bob rec... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tickets(people):\n people= [100, 50, 25]",
"def getMutation(AA,Codon):\r\n temp_mutationlist = []\r\n '''create a list of possible triplets within hamming distance 1 '''\r\n for item in INI.genetic_code.keys():\r\n isvalid = INI.isvalidtriplet(item,Codon)\r\n ''' Hamming distance 1,... | [
"0.50222504",
"0.4935165",
"0.48622358",
"0.48220435",
"0.47618267",
"0.47593555",
"0.47593555",
"0.4643787",
"0.46382758",
"0.46354654",
"0.4608568",
"0.46082112",
"0.45961824",
"0.45707282",
"0.45663276",
"0.45569414",
"0.4548844",
"0.4518503",
"0.44995117",
"0.44929427",
"... | 0.6884154 | 0 |
Start a daemon with given daemon class. | def run(self, name: str, daemon_class: object, **kwargs) -> None:
if name in self._running_daemons:
raise AlreadyRunningDaemon(
'Daemon with name "{0}" already running'.format(name)
)
logger.info(self, 'Starting daemon with name "{0}" and class "{1}" ...'
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_daemon(self, *args, **kwargs):\n pass",
"def daemon(self):\n obj = self.subparsers.add_parser(\"daemon\", help=\"Daemon scripts\")\n obj.add_argument(\n \"daemon_type\",\n # default=\"all\",\n # const=\"all\",\n nargs=1,\n choi... | [
"0.73497087",
"0.70751816",
"0.66535014",
"0.62650317",
"0.6090769",
"0.6031981",
"0.5863433",
"0.5808442",
"0.5699542",
"0.56858295",
"0.56858295",
"0.56858295",
"0.56858295",
"0.56688225",
"0.5592923",
"0.558033",
"0.54989725",
"0.5492751",
"0.5471203",
"0.54689485",
"0.543... | 0.77797806 | 0 |
Stop daemon with his name and wait for him. Where name is given name when daemon started with run method. | def stop(self, name: str) -> None:
if name in self._running_daemons:
logger.info(self, 'Stopping daemon with name "{0}" ...'
.format(name))
self._running_daemons[name].stop()
self._running_daemons[name].join()
del self._running_daemon... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop(name):\n __salt__[\"file.touch\"](\"{}/down\".format(_service_path(name)))\n cmd = \"svc -d {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)",
"def stop(self):\n \n\n if os.path.isfile(self.pidfilename):\n\n with open(self... | [
"0.6745563",
"0.6272498",
"0.61989063",
"0.61097825",
"0.61078",
"0.61078",
"0.61078",
"0.61078",
"0.61078",
"0.61078",
"0.6076316",
"0.60686696",
"0.5986718",
"0.59339917",
"0.59136623",
"0.58186764",
"0.5801948",
"0.5795598",
"0.57658505",
"0.5718274",
"0.56732404",
"0.56... | 0.8205817 | 0 |
Stop all started daemons and wait for them. | def stop_all(self) -> None:
logger.info(self, 'Stopping all daemons')
for name, daemon in self._running_daemons.items():
logger.info(self, 'Stopping daemon "{0}" ...'.format(name))
daemon.stop()
for name, daemon in self._running_daemons.items():
logger.info(
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stopdaemons(self):\n # TODO: we may want to improve this if we had the PIDs from the\n # specific EMANE daemons that we\"ve started\n cmd = [\"killall\", \"-q\", \"emane\"]\n stop_emane_on_host = False\n if emane.VERSION > emane.EMANE091:\n for node in self.g... | [
"0.6984014",
"0.6787286",
"0.6787142",
"0.67022586",
"0.66824",
"0.66690004",
"0.66061735",
"0.6547983",
"0.64799696",
"0.64767784",
"0.6424291",
"0.6407507",
"0.64050364",
"0.63752985",
"0.63732857",
"0.6344195",
"0.6343308",
"0.63306564",
"0.63221437",
"0.6291567",
"0.62800... | 0.75923276 | 0 |
Add callback to self._daemon_execute_callbacks. See service_actions function to their usages. | def append_thread_callback(self, callback: collections.Callable) -> None:
self._daemon_execute_callbacks.append(callback) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_post_exec_callback(action_logger):\n logging.debug(\"Adding %s to post execution callback\", action_logger)\n __post_exec_callbacks.append(action_logger)",
"def add_done_callback(self, callback):\n with self._done_condition:\n if self._state in [PENDING, RUNNING]:\n ... | [
"0.595641",
"0.57329416",
"0.5647649",
"0.56453633",
"0.56191623",
"0.55979604",
"0.5584443",
"0.5554504",
"0.5554059",
"0.5503657",
"0.55009544",
"0.5450373",
"0.54332",
"0.5432897",
"0.5395858",
"0.53488904",
"0.53168035",
"0.53141373",
"0.53129905",
"0.53031254",
"0.528276... | 0.71937627 | 0 |
Give the callback to running server through tracim.lib.daemons.TracimSocketServerMixinappend_thread_callback | def append_thread_callback(self, callback: collections.Callable) -> None:
self._server.append_thread_callback(callback) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def append_thread_callback(self, callback: collections.Callable) -> None:\n raise NotImplementedError()",
"def append_thread_callback(self, callback: collections.Callable) -> None:\n raise NotImplementedError()",
"def append_thread_callback(self, callback: collections.Callable) -> None:\n ... | [
"0.69699574",
"0.69699574",
"0.6759113",
"0.650974",
"0.6113602",
"0.6105394",
"0.6034728",
"0.5840231",
"0.58131206",
"0.5809989",
"0.58065826",
"0.57807076",
"0.5766478",
"0.57587993",
"0.57118356",
"0.57069206",
"0.5706047",
"0.5696736",
"0.5682783",
"0.56253636",
"0.56038... | 0.7782145 | 0 |
Validate if price amount does not have too many decimal places. Price amount can't have more decimal places than currency allow to. Works only with decimal created from a string. | def validate_price_precision(value: Optional["Decimal"], currency: str = None):
# check no needed when there is no value
if not value:
return
currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY)
value = value.normalize()
if abs(value.as_tuple().exponent) > curre... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_price(price):\n try:\n price = float(price)\n except ValueError:\n raise ValueError('Please provide valid price')\n if price < 1:\n raise ValueError('Price should be positive number')\n return price",
"def monetary_amount_valid(record, field_name='price', min=1, max... | [
"0.7102188",
"0.6969148",
"0.67373794",
"0.66555464",
"0.6602999",
"0.65736985",
"0.64711094",
"0.64241284",
"0.64125",
"0.6275703",
"0.61710167",
"0.6137798",
"0.6098253",
"0.6075076",
"0.6048671",
"0.6011906",
"0.5996882",
"0.5977033",
"0.5968822",
"0.5943398",
"0.5926788",... | 0.79515773 | 0 |
Function to handle the initialization of the class. Creates a [x,y] sample for each timestep std sequenceLenght | def __init__(self, std, sequenceLength, device):
#create data steps from 2 to 10 with the given sequence length
xTimeSteps = np.linspace(2, 10, sequenceLength + 1)
#create numpy array with sin(x) input
yNp = np.zeros((2, sequenceLength + 1))
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_sample(self):\n self.timestamps = np.zeros(5)\n self.data = np.zeros((5, 12))",
"def __init__(self, samples):\n self.samples = samples",
"def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n ... | [
"0.7491473",
"0.6757173",
"0.6536982",
"0.645627",
"0.6443957",
"0.63328993",
"0.63073266",
"0.63073266",
"0.6270523",
"0.622384",
"0.6216968",
"0.6200106",
"0.61991644",
"0.61531895",
"0.6119851",
"0.6113955",
"0.6103908",
"0.6098615",
"0.609861",
"0.60970324",
"0.6093548",
... | 0.67708 | 1 |
Creates the matrices for the Elman model, in this case W1 and V contextConcatInputLayerSize hiddenLayerSize outputLayerSize | def __init__(self, contextConcatInputLayerSize, hiddenLayerSize, outputLayerSize, device):
super(ElmanNet, self).__init__()
self.hidden_layer_size = hiddenLayerSize
# Initializes the W1 matrix
W1 = torch.zeros((contextConcatInputLayerSize, hiddenLa... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_variables(self):\n self.create_weight_variable(self.input_size + [self.hidden_size[0]], name=\"W1\")\n\n self.create_bias_variable((1, self.hidden_size[0]), name=\"b1\")\n\n for i in range(self.n_hidden-1):\n self.create_weight_variable([self.hidden_size[i], self.hidden_s... | [
"0.60445243",
"0.5994819",
"0.59890795",
"0.5960874",
"0.59296095",
"0.59175307",
"0.59017277",
"0.58717036",
"0.5868812",
"0.5800571",
"0.56979",
"0.56773806",
"0.5673088",
"0.56546074",
"0.56416607",
"0.5627796",
"0.5624328",
"0.55904293",
"0.55746734",
"0.5568759",
"0.5557... | 0.73267615 | 0 |
Function that retrieves the size of the hidden layer | def get_hidden_layer_size(self):
return self.hidden_layer_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def layer_size(self, layer_id): # -> int:\n ...",
"def hidden_size(self):\n return self._internal.get_hidden_size()",
"def get_final_emb_size(self):\n size = self.n_layers * 1 * 2 * self.hidden_size\n return size",
"def get_size(self):\n return self._surf.get_size()",
"de... | [
"0.7838551",
"0.7757203",
"0.765589",
"0.7248255",
"0.72339445",
"0.72339445",
"0.71614826",
"0.71602",
"0.71266425",
"0.7090172",
"0.70477694",
"0.70440054",
"0.6969208",
"0.69381",
"0.69150704",
"0.687971",
"0.68564427",
"0.6826497",
"0.6812965",
"0.6804558",
"0.67893684",
... | 0.88835496 | 0 |
Model forward pass input, current input in t contextState, previous output in (t 1) the sequence of hidden states | def forward(self, x, contextState):
#concatenate input and context state
#x = x.t()
xAndContext = torch.cat((x, contextState), 1)
#calculate next context state (hidden output for current t) with tanh(xAndContext * W1)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, prev_state, obs_t):\r\n # Use your network to compute qvalues for given state\r\n #print(state_t.shape)\r\n h = self.conv(obs_t)\r\n\r\n h = h.view(h.size(0), -1)\r\n\r\n new_state = h_new, c_new = self.lstm(h, prev_state)\r\n advantage = self.adv(h_new)\... | [
"0.70666903",
"0.6960144",
"0.6944148",
"0.6924527",
"0.68692386",
"0.68370396",
"0.68172926",
"0.6813111",
"0.68120724",
"0.68052375",
"0.67952406",
"0.6781737",
"0.67794245",
"0.6764326",
"0.6733078",
"0.6679583",
"0.66273844",
"0.6616958",
"0.65847284",
"0.657578",
"0.6555... | 0.7525371 | 0 |
Check if two Elongation objects are equivalent. | def __eq__(self, other):
return isinstance(other, Elongation)\
and len(self.xs) == len(other.xs)\
and all(self.xs == other.xs) and all(self.ys == other.ys)\
and self.gauge_length == other.gauge_length\
and self.sample_width == other.sample_width\
and s... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def areEquivalent(*args):\n return _libsbml.Unit_areEquivalent(*args)",
"def equivalent(self, other):\n return id(self) == id(other)",
"def almost_equals(self, other):\n if self.__class__ is other.__class__ and len(self) == len(other):\n for a, b in zip(self, other):\n ... | [
"0.7179371",
"0.71698356",
"0.70256376",
"0.69242305",
"0.6891672",
"0.6889412",
"0.6868289",
"0.6853487",
"0.6802602",
"0.6787559",
"0.6784947",
"0.6781069",
"0.6712332",
"0.6677166",
"0.6673854",
"0.6671227",
"0.66428155",
"0.66405296",
"0.66279215",
"0.66225433",
"0.660195... | 0.7408706 | 0 |
Generate a smoothed version of the Elongation. | def smoothed(self, box_pts=True):
elong = self.copy()
elong.ys = smooth_curve(self.ys, box_pts)
return elong | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _smooth(self):\n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self... | [
"0.6047224",
"0.5958558",
"0.5911852",
"0.58532757",
"0.58519554",
"0.5808192",
"0.5806572",
"0.5766009",
"0.57573235",
"0.565053",
"0.56344795",
"0.5537342",
"0.55034465",
"0.5393578",
"0.5387983",
"0.538715",
"0.53834933",
"0.5319106",
"0.5317269",
"0.5313368",
"0.53031206"... | 0.6461423 | 0 |
Crop the Elongation by index. | def cropped_index(self, start_i=None, end_i=None, shifted=True):
xs = self.xs[start_i:end_i]
ys = self.ys[start_i:end_i]
if shifted:
xs = xs - xs[0]
return self.__class__(xs, ys, self.gauge_length, self.sample_width, self.sample_thickness, self.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def crop(self, N):\n self.data = self.data[:,:N]",
"def crop(self):\n return np.array([f.crop() for f in self])",
"def crop(self, timerange):\n\n begin = self.bisect(timerange.begin())\n end = self.bisect(timerange.end(), begin)\n return self.slice(begin, end)",
"def conver... | [
"0.61602694",
"0.5879224",
"0.5870638",
"0.581509",
"0.56682354",
"0.56376",
"0.56352115",
"0.5629972",
"0.5623761",
"0.55894375",
"0.5587319",
"0.5582777",
"0.55521005",
"0.5534629",
"0.5528097",
"0.55064076",
"0.5488465",
"0.54711306",
"0.5463051",
"0.54613936",
"0.5452928"... | 0.60998905 | 1 |
Determine the strain index of break. Break is defined herein as the last peak in the stress/strain curve. | def break_index(self, **kwargs):
return self.peak_indices(**kwargs)[0][-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if... | [
"0.63903123",
"0.5647532",
"0.5634",
"0.54937726",
"0.544508",
"0.5438141",
"0.54027975",
"0.53570503",
"0.5290587",
"0.5280061",
"0.5275725",
"0.5270513",
"0.5268599",
"0.52369666",
"0.52302957",
"0.52163225",
"0.5215555",
"0.5207446",
"0.52039963",
"0.51773316",
"0.5164221"... | 0.6041352 | 1 |
Write Elongation object to a csv file. | def write_csv(elongation, file_name):
e = elongation
with open(file_name, 'w') as f:
f.write(f"""\
Break Load, {e.break_load()}
Break Strength, {e.break_strength()}
Break Elongation, {e.break_elongation()}
Yield Load, {e.yield_load()}
Yield Strength, {e.yield_strength()}
Yield Elongation, {e.yield_elon... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n ... | [
"0.71679187",
"0.7032536",
"0.70268303",
"0.69770074",
"0.6973022",
"0.693288",
"0.6858325",
"0.68417126",
"0.6832732",
"0.68016165",
"0.67204237",
"0.6718283",
"0.671795",
"0.67077386",
"0.6678489",
"0.66233027",
"0.6594838",
"0.65919703",
"0.65797895",
"0.6558252",
"0.65327... | 0.7691217 | 0 |
Read an iterable of elongation files. | def read_elongations(file_names):
return list(itertools.chain(*(read_elongation(f) for f in file_names))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\r\n example = []\r\n for line in open(self.fullpath):\r\n if line != '\\n':\r\n example.append(line.rstrip()) # remove newline\r\n else:\r\n yield example\r\n example = []",
"def read_files(self):\n for f ... | [
"0.6163643",
"0.61128104",
"0.609269",
"0.6008233",
"0.59725094",
"0.59578764",
"0.59578764",
"0.59162337",
"0.5894706",
"0.5822738",
"0.58118016",
"0.57776636",
"0.5761199",
"0.5705911",
"0.5697656",
"0.5660844",
"0.56368",
"0.5615509",
"0.56117857",
"0.5606222",
"0.5591424"... | 0.64478576 | 0 |
Downloads all files from the SugarSync account to the provided output folder | def download_files(self, output, replace=False):
try:
# Create output directory
# self._output_path = os.path.join(output,
# "sugardl_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")))
# os.makedirs(self._output_path)... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download(urls, dest_folder):\n pass",
"def download_output_files(self):\n bucket_list = self.bucket.list(\"output/part\")\n for bucket_entry in bucket_list:\n key_string = str(bucket_entry.key)\n # check if file exists locally, if not: download it\n if not os.p... | [
"0.6863336",
"0.6838265",
"0.6813811",
"0.6791622",
"0.6458129",
"0.64210194",
"0.63175696",
"0.6252998",
"0.62363803",
"0.621832",
"0.6204603",
"0.6164557",
"0.6153396",
"0.61488926",
"0.6148461",
"0.6134311",
"0.6120213",
"0.60974497",
"0.6057297",
"0.5996962",
"0.5962937",... | 0.74477714 | 0 |
Retrieves user information to include sync folders | def _get_user_info(self):
if not self._refresh_token:
raise ValueError("Refresh Token not set")
# Add access token to the headers
add_headers = dict(self._default_headers)
add_headers['Authorization'] = self._access_token
resp = requests.get(BASE_URL + "user/{}".fo... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_info(self) -> str:\n return self._searcher.get_user_info()",
"def get_users_info(): \n \n data = user_obj.get_users_info()\n return data",
"def user_info(self):\n response = self.query('user_info')\n return response",
"def getUserInfo(self, user):\n return pwd.ge... | [
"0.69573396",
"0.68997866",
"0.6759884",
"0.6710544",
"0.6675518",
"0.661304",
"0.6520475",
"0.6491115",
"0.6431651",
"0.63984233",
"0.6313943",
"0.63038987",
"0.6303114",
"0.63017505",
"0.6268096",
"0.62518907",
"0.62275803",
"0.61987466",
"0.61969614",
"0.6171311",
"0.61620... | 0.7239684 | 0 |
Retrieves metadata on all sync folders | def _get_sync_folders(self):
if not self._user_sync_folders_url:
raise ValueError("User sync folders URL not retrieved")
if not self._refresh_token:
raise ValueError("Refresh Token not set")
# Add access token to the headers
add_headers = dict(self._default_hea... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_root_metadata(self):\n r = self._do_request(\n 'get',\n http_server_utils.join_url_components(\n [self._api_drive_endpoint_prefix, 'root']),\n params={'select': 'id,name,fileSystemInfo'})\n return r.json()",
"def syncfolder():",
"def getFol... | [
"0.648162",
"0.60808307",
"0.59680235",
"0.58834165",
"0.58828735",
"0.58178836",
"0.58129156",
"0.57245374",
"0.5677313",
"0.5626248",
"0.5588174",
"0.5587416",
"0.55705136",
"0.55277115",
"0.55202806",
"0.5497931",
"0.54850954",
"0.54689896",
"0.5424635",
"0.5419897",
"0.54... | 0.7083151 | 0 |
If we're unable to establish a connection to the Elasticsearch server, CannotLoadConfiguration (which the circulation manager can understand) is raised instead of an Elasticsearchspecific exception. | def test_elasticsearch_error_in_constructor_becomes_cannotloadconfiguration(self):
# Unlike other tests in this module, this one runs even if no
# ElasticSearch server is running, since it's testing what
# happens if there's a problem communicating with that server.
class Mock(ExternalS... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def check_connection(self, hass: HomeAssistantType):\n from elasticsearch import (\n AuthenticationException,\n AuthorizationException,\n ConnectionError,\n ElasticsearchException,\n SSLError,\n )\n\n client = None\n is_suppor... | [
"0.6263491",
"0.62350416",
"0.6013181",
"0.5960673",
"0.59529793",
"0.5821141",
"0.5678517",
"0.5538262",
"0.5535928",
"0.5493257",
"0.54761046",
"0.5462948",
"0.53717524",
"0.5338984",
"0.5287106",
"0.52525455",
"0.5251487",
"0.5240034",
"0.5202334",
"0.5163459",
"0.51596135... | 0.699837 | 0 |
The name of the search index is the prefix (defined in ExternalSearchTest.setup) plus a version number associated with this version of the core code. | def test_works_index_name(self):
assert "test_index-v4" == self.search.works_index_name(self._db) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def db_index_name(self):\r\n return 'index_{}'.format(self.db_field_name)",
"def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")",
"def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")",
"def build_index():\n pas... | [
"0.58981884",
"0.5859757",
"0.5859757",
"0.5726856",
"0.56515306",
"0.5513793",
"0.5505178",
"0.54348093",
"0.53886336",
"0.5384905",
"0.5370095",
"0.5368973",
"0.53638005",
"0.5342818",
"0.5323321",
"0.53231466",
"0.53118646",
"0.53068525",
"0.53068525",
"0.52520263",
"0.521... | 0.6855528 | 0 |
When all the filters are applied to `start`, the result is `finish`. | def filters_to(start, finish):
for find, replace in filters:
start = find.sub(replace, start)
assert start == finish | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def after_all(self) -> None:\r\n for a_filter in self.filters[::-1]:\r\n a_filter.after_all()",
"def analyze(self, start, end):\n return",
"def FilterDone(self, last_bits):\n return last_bits",
"def __call__(self, start):\r\n return self._iterate(start)",
"def catch_up(se... | [
"0.60245126",
"0.58253586",
"0.5557765",
"0.546555",
"0.5420422",
"0.5409369",
"0.53233445",
"0.5279904",
"0.5211791",
"0.51451194",
"0.51429945",
"0.5127559",
"0.50891775",
"0.50891775",
"0.50891775",
"0.50891775",
"0.5051931",
"0.5035896",
"0.49861154",
"0.4938378",
"0.4924... | 0.74531156 | 0 |
Iterate over a WorkList until it ends, and return all of the pages. | def pages(worklist):
pagination = SortKeyPagination(size=2)
facets = Facets(
self._default_library, None, None, order=Facets.ORDER_TITLE
)
pages = []
while pagination:
pages.append(worklist.works(
self._db, f... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pages(self, url_list):\n page_helper = self.get_page\n pool = ThreadPool(self.max_threads)\n results = pool.map(page_helper, url_list)\n pool.close()\n pool.join()\n return results",
"def pages(self):\n # The page list comes in three sections. Given radiu... | [
"0.6512649",
"0.6426794",
"0.6418355",
"0.63479626",
"0.62923247",
"0.6280523",
"0.6178995",
"0.6132314",
"0.61132336",
"0.6108371",
"0.6048124",
"0.6044336",
"0.60390985",
"0.6030068",
"0.5951634",
"0.59403145",
"0.59379506",
"0.58951074",
"0.5890618",
"0.5873708",
"0.587297... | 0.7885278 | 0 |
Verify that when the books created during test setup are ordered by the given `sort_field`, they show up in the given `order`. Also verify that when the search is ordered descending, the same books show up in the opposite order. This proves that `sort_field` isn't being ignored creating a test that only succeeds by cha... | def assert_order(sort_field, order, **filter_kwargs):
expect = self._expect_results
facets = Facets(
self._default_library, Facets.COLLECTION_FULL,
Facets.AVAILABLE_ALL, order=sort_field, order_ascending=True
)
expect(order, None, Filter(fa... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".for... | [
"0.69119376",
"0.62695354",
"0.59014153",
"0.5880185",
"0.5848647",
"0.5769646",
"0.5743111",
"0.5740987",
"0.56924033",
"0.56718487",
"0.56502676",
"0.5648147",
"0.5643026",
"0.56352484",
"0.56259537",
"0.55134785",
"0.55029243",
"0.5502388",
"0.5476563",
"0.54709595",
"0.54... | 0.7052679 | 0 |
Simulate the creation of an ElasticsearchDSL `Search` object from an ElasticsearchDSL `Query` object. | def query(self, query):
return MockSearch(
self, query, self.nested_filter_calls, self.order,
self._script_fields
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _search(self, query):\n return self._request(query)",
"def search_query(\n self,\n index, # type: str\n query, # type: SearchQuery\n *options, # type: SearchOptions\n **kwargs\n ) -> SearchResult:\n\n query = SearchQueryBuilder.create_search_query_object... | [
"0.7029542",
"0.6688671",
"0.6600027",
"0.6517039",
"0.6485709",
"0.64265573",
"0.64017105",
"0.6352035",
"0.6290121",
"0.61653656",
"0.614457",
"0.61306244",
"0.60751146",
"0.6055303",
"0.60288244",
"0.6017776",
"0.6013076",
"0.59807044",
"0.59282154",
"0.591098",
"0.5907803... | 0.75526977 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.