query stringlengths 9 9.05k | document stringlengths 10 222k | metadata dict | negatives listlengths 30 30 | negative_scores listlengths 30 30 | document_score stringlengths 4 10 | document_rank stringclasses 2
values |
|---|---|---|---|---|---|---|
Publishes application by uploading the manifest to the given marketplace | def _publish(client, manifest_path, marketplace, skip, overrides):
try:
manifest_json = check_app_manifest(manifest_path, overrides, marketplace)
app_url = "{}://{}".format(manifest_json["schemes"][0], manifest_json["host"])
app_ip = urlparse(app_url).hostname
if not skip:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def submit(ctx, manifest_path, marketplace, skip, parameters):\n if parameters is not None:\n try:\n parameters = _parse_parameters(parameters)\n except:\n logger.error(\n \"Manifest parameter overrides should be in the form 'key1=\\\"value1\\\" \"\n ... | [
"0.68291336",
"0.6059394",
"0.6052995",
"0.6010262",
"0.5933436",
"0.5847316",
"0.5775076",
"0.57516134",
"0.5748247",
"0.5703469",
"0.56963223",
"0.56790406",
"0.5647197",
"0.56322503",
"0.5598916",
"0.5595371",
"0.5584658",
"0.5561256",
"0.54689205",
"0.5453668",
"0.5450249... | 0.7366084 | 0 |
Queries the marketplace for published apps | def get_search_results(config, client, page):
resp = client.get_published_apps(config.username, page)
resp_json = resp.json()
search_results = resp_json["results"]
if search_results is None or len(search_results) == 0:
logger.info(
click.style("You haven't published any apps to the m... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_apps(provider, query):\n\n workdir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(workdir, '..', 'config.yml')) as f:\n config = yaml.load(f)\n ex = Explorer()\n logging.info('Read bucket: %s', config['SCOOP_BUCKET'])\n apps = ex.get_apps(os.path.expandvars(con... | [
"0.6542337",
"0.65027314",
"0.64569217",
"0.6388464",
"0.618664",
"0.6088148",
"0.6048241",
"0.60335594",
"0.60146016",
"0.6012545",
"0.5858998",
"0.5831379",
"0.57853997",
"0.5776328",
"0.5764713",
"0.57061666",
"0.5654142",
"0.56492996",
"0.56406236",
"0.56354034",
"0.56090... | 0.66877913 | 0 |
Replace "AUTO" in the host and quickbuy with the ZeroTier IP. The server subsequently replaces, in the displayed quickbuy, instances of the manifest host value with a mkt.21.co address. | def replace_auto(manifest_dict, marketplace):
manifest_dict = copy.deepcopy(manifest_dict)
def get_formatted_zerotier_address(marketplace):
host = get_zerotier_address(marketplace)
if "." not in host:
return "[{}]".format(host)
else:
return host
if 'AUTO' in ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' ... | [
"0.60582787",
"0.6001809",
"0.57579035",
"0.5625545",
"0.5491891",
"0.5420629",
"0.5379404",
"0.5277771",
"0.52422714",
"0.52219576",
"0.52120817",
"0.52100813",
"0.52078915",
"0.5189471",
"0.5156395",
"0.51266915",
"0.5107505",
"0.5092903",
"0.5082044",
"0.5071935",
"0.50623... | 0.7036063 | 0 |
Validates the manifest file Ensures that the required fields in the manifest are present and valid | def validate_manifest(manifest_json):
manifest_json = copy.deepcopy(manifest_json)
for field in ["schemes", "host", "basePath", "info"]:
if field not in manifest_json:
raise exceptions.ValidationError(
click.style("Field '{}' is missing from the manifest file.", fg="red").for... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--m... | [
"0.73052067",
"0.6979145",
"0.69215554",
"0.68080264",
"0.6786265",
"0.6757262",
"0.6450811",
"0.6440165",
"0.640244",
"0.6351523",
"0.63133943",
"0.62944144",
"0.62856257",
"0.62499046",
"0.62444276",
"0.62436587",
"0.6221382",
"0.61910504",
"0.6152198",
"0.60909945",
"0.606... | 0.741697 | 0 |
Gets the zerotier IP address from the given marketplace name | def get_zerotier_address(marketplace):
logger.info("You might need to enter your superuser password.")
address = zerotier.get_address(marketplace)
if not address:
join_cmd = click.style("21 join", bold=True, reset=False)
no_zt_network = click.style(
"You are not part of the {}. U... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vip_address(self, vip_name):\n networks = self.nailgun_client.get_networks(self.cluster_id)\n vip = networks.get('vips').get(vip_name, {}).get('ipaddr', None)\n asserts.assert_is_not_none(\n vip, \"Failed to get the IP of {} server\".format(vip_name))\n\n logger.debug... | [
"0.64503324",
"0.63069993",
"0.6197815",
"0.618933",
"0.61700016",
"0.609886",
"0.5992339",
"0.5982672",
"0.5972976",
"0.5954624",
"0.5945781",
"0.5938082",
"0.5936992",
"0.5928978",
"0.59271926",
"0.59166557",
"0.58961654",
"0.5854242",
"0.581978",
"0.58141637",
"0.5809872",... | 0.6974309 | 0 |
Set mode wireframe only | def setDisplayMode(self, mode):
return "Wireframe" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()",
"def wireframe_only(self):\n return self._wireframe_only",
"def setSurfaceShadingMode(mode='flat'):\n sdict = {'flat':'FLAT','smooth':'SMOOTH'}\n dislin.shdmod(sdict[mode], 'SURF... | [
"0.8165864",
"0.7190839",
"0.67805",
"0.6758721",
"0.65666986",
"0.6441041",
"0.6434427",
"0.6336882",
"0.6314271",
"0.6215526",
"0.61872584",
"0.6129271",
"0.60960007",
"0.6085161",
"0.5989606",
"0.5981274",
"0.59671307",
"0.5965656",
"0.5920257",
"0.58588403",
"0.5820351",
... | 0.7538678 | 1 |
Register an asset required by a dashboard module. Some modules require special scripts or stylesheets, like the | def register_module_asset(self, asset):
self._module_assets.append(asset) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_xmodule_assets():\r\n sh('xmodule_assets common/static/xmodule')",
"def assets():\n pass",
"def assets():",
"def script_info_assets(app, static_dir, testcss):\n InvenioAssets(app)\n\n blueprint = Blueprint(__name__, \"test_bp\", static_folder=static_dir)\n\n class Ext(object):\n ... | [
"0.6323401",
"0.6234134",
"0.5986547",
"0.57688546",
"0.57355887",
"0.57000977",
"0.5596467",
"0.5571452",
"0.55544966",
"0.55511576",
"0.5550094",
"0.54957074",
"0.548881",
"0.54596376",
"0.5429053",
"0.537195",
"0.5304316",
"0.52905613",
"0.52882594",
"0.5239327",
"0.523754... | 0.70824754 | 0 |
Prepare this dashboard instance to run. | def _prepare(self):
# Set configuration defaults and save to the project document
self.config.setdefault('PAGINATION', True)
self.config.setdefault('PER_PAGE', 25)
# Create and configure the Flask application
self.app = self._create_app(self.config)
# Add assets and ro... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n # Wipe the db\n self.wipe_db()\n\n # Set some global things\n try:\n dashboard_configuration = DashboardConfiguration(type=\"default\")\n dashboard_configuration.save()\n except IntegrityError:\n dashboard_configuration = Das... | [
"0.62227356",
"0.61323386",
"0.6124156",
"0.6124156",
"0.6124156",
"0.603257",
"0.6021469",
"0.6008526",
"0.59598756",
"0.5919939",
"0.5891619",
"0.58891463",
"0.5887354",
"0.5885531",
"0.58851296",
"0.58174354",
"0.57714",
"0.57629365",
"0.57341665",
"0.57147014",
"0.5703350... | 0.6481295 | 0 |
Override this method for custom job titles. This method generates job titles. By default, the title is a pretty (but verbose) form of the job state point, based on the project schema. | def job_title(self, job):
def _format_num(num):
if isinstance(num, bool):
return str(num)
elif isinstance(num, Real):
return str(round(num, 2))
return str(num)
try:
s = []
for keys in sorted(self._schema_variabl... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_job_title(self, job_name):\n return ''",
"def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n time... | [
"0.6774112",
"0.6684482",
"0.6545176",
"0.65105885",
"0.64988434",
"0.6343814",
"0.6343415",
"0.6338762",
"0.62752765",
"0.6266623",
"0.6256214",
"0.6256214",
"0.6229947",
"0.6229947",
"0.6226944",
"0.61802864",
"0.61802864",
"0.6152143",
"0.6079925",
"0.60740346",
"0.6057321... | 0.756392 | 0 |
Override this method for custom job subtitles. This method generates job subtitles. By default, the subtitle is a minimal unique substring of the job id. | def job_subtitle(self, job):
return str(job)[:max(8, self._project_min_len_unique_id())] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subtitle(self, txt):\n num = len(txt)\n ticks = \"-\" * num\n print(txt)\n print(ticks)",
"def getSubtitleURL(self):\n\n # If it is a movie, we use this methodology -\n try:\n IndexingParameters = [\"subtitleUrls\", 0, \"url\"]\n TitleParamters ... | [
"0.6298904",
"0.5891134",
"0.57354397",
"0.57253325",
"0.57177824",
"0.5635711",
"0.5478623",
"0.5465625",
"0.5414711",
"0.54018885",
"0.53634965",
"0.53630906",
"0.535014",
"0.5312166",
"0.5256253",
"0.5206248",
"0.5183688",
"0.5179891",
"0.517678",
"0.516628",
"0.51632917",... | 0.74129283 | 0 |
Override this method for custom job sorting. This method returns a key that can be compared to sort jobs. By | def job_sorter(self, job):
key = natsort.natsort_keygen(key=self.job_title, alg=natsort.REAL)
return key(job) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def job_priority_key(self, job):\n raise NotImplemented",
"def sort_key(self):\n ...",
"def get_sort_key(self) -> str:\n return self.name",
"def job_priority_key(self, job):\n camp, user = job.camp, job.user\n end = camp.time_left / user.shares # lower value -> higher prio... | [
"0.75236887",
"0.74009395",
"0.71635604",
"0.7031687",
"0.64361453",
"0.6359381",
"0.62978643",
"0.6156635",
"0.61132455",
"0.6011168",
"0.59698707",
"0.59516037",
"0.5946783",
"0.592785",
"0.5922575",
"0.59023565",
"0.58991647",
"0.58991647",
"0.5893522",
"0.5864389",
"0.584... | 0.78755414 | 0 |
Registers routes with the Flask application. This method configures context processors, templates, and sets up routes for a basic Dashboard instance. Additionally, routes declared by modules are registered by this method. | def _register_routes(self):
dashboard = self
@dashboard.app.after_request
def prevent_caching(response):
if 'Cache-Control' not in response.headers:
response.headers['Cache-Control'] = 'no-store'
return response
@dashboard.app.context_processor
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_routes(self):\n# from server.flask import views as flask_views\n# flask_views_custom_methods = filter(lambda x: x.startswith(\"view_\"), dir(flask_views))\n# for custom_method in flask_views_custom_methods:\n# # Retrieve data needed to add the URL rule to the Flask app\n# ... | [
"0.72350115",
"0.6712779",
"0.67111504",
"0.66817683",
"0.6653695",
"0.6587917",
"0.6505443",
"0.64713275",
"0.6450273",
"0.63990045",
"0.63630635",
"0.63256997",
"0.63007975",
"0.6283785",
"0.6241679",
"0.6237398",
"0.62150884",
"0.61836624",
"0.6177383",
"0.6159926",
"0.612... | 0.7511353 | 0 |
Clear project and dashboard server caches. The dashboard relies on caching for performance. If the data space is altered, this method may need to be called before the dashboard reflects those changes. | def update_cache(self):
# Try to update signac project cache. Requires signac 0.9.2 or later.
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
try:
self.project.update_cache()
except Exception:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_cache():\n # TODO\n pass",
"def clear_cache(self):\n\n for dataset in self._datasets:\n dataset.clear_cache()",
"def clear_cache(self):\n pass",
"def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in ... | [
"0.67237765",
"0.67170703",
"0.6651066",
"0.65608287",
"0.6541997",
"0.64387864",
"0.642453",
"0.63758636",
"0.6363097",
"0.6251716",
"0.62509453",
"0.62315863",
"0.6219807",
"0.6218337",
"0.6209813",
"0.62015516",
"0.61923134",
"0.61808413",
"0.6152699",
"0.61314887",
"0.611... | 0.72654325 | 0 |
Runs the command line interface. Call this function to use signacdashboard from its command line | def main(self):
def _run(args):
kwargs = vars(args)
if kwargs.get('host', None) is not None:
self.config['HOST'] = kwargs.pop('host')
if kwargs.get('port', None) is not None:
self.config['PORT'] = kwargs.pop('port')
self.config['PR... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)",
"def cli():\n pass",
"def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)",
"def main_cli():\n pass",
"def main():\n CLI_APP.run()",
"def cli():... | [
"0.73368174",
"0.6704575",
"0.66263586",
"0.6620892",
"0.65879875",
"0.65856606",
"0.6576689",
"0.6504645",
"0.64861155",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405",
"0.6461405"... | 0.70329654 | 1 |
Test the popxl simple addition example | def test_documentation_popxl_addition(self):
filename = "simple_addition.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)",
"def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)",
"def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)",
"def test_and_numbers(self):\n self.assertEqual(add(3,8), 11)",
"def test_add_two_num... | [
"0.65502137",
"0.65502137",
"0.65502137",
"0.64431727",
"0.63226366",
"0.6308587",
"0.628632",
"0.6280519",
"0.6253776",
"0.6226606",
"0.62078625",
"0.61861867",
"0.61754084",
"0.6170412",
"0.61692727",
"0.615465",
"0.6137472",
"0.61353207",
"0.6135082",
"0.6111764",
"0.60554... | 0.70790774 | 0 |
Test the popxl basic subgraph example | def test_documentation_popxl_basic_subgraph(self):
filename = "basic_graph.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_create_multi_subgraph(self):\n filename = \"create_multi_graphs_from_same_func.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def sub_graph_merging(self):",
"def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count... | [
"0.75176567",
"0.62960446",
"0.60611916",
"0.6042744",
"0.6038377",
"0.5965643",
"0.5938931",
"0.58985436",
"0.587293",
"0.5843227",
"0.58157665",
"0.5796282",
"0.5735608",
"0.5665891",
"0.5604234",
"0.5579183",
"0.5560783",
"0.5559308",
"0.5536412",
"0.5530756",
"0.5525885",... | 0.8505168 | 0 |
Test the popxl replication example | def test_documentation_popxl_replication(self):
filename = "replication.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def test_replicate_pg_to_pg(self):\n # TODO - Real and more complex e2e tests will be added here\n ... | [
"0.6849515",
"0.5858241",
"0.567002",
"0.5621691",
"0.5603418",
"0.5590965",
"0.55826133",
"0.55337054",
"0.55278075",
"0.5499861",
"0.5477127",
"0.539578",
"0.53619903",
"0.5334775",
"0.5324242",
"0.53177917",
"0.53116417",
"0.53056175",
"0.52885664",
"0.5232968",
"0.5212479... | 0.8200137 | 0 |
Test the popxl create multiple subgraph example | def test_documentation_popxl_create_multi_subgraph(self):
filename = "create_multi_graphs_from_same_func.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def sub_graph_merging(self):",
"def populate_graph(self):",
"def test_documentation_popxl_repeat_2(self):\n filename = \"repeat... | [
"0.79100144",
"0.65580076",
"0.64052117",
"0.63540345",
"0.6348667",
"0.61608034",
"0.6133355",
"0.5854858",
"0.5819954",
"0.5817296",
"0.5729944",
"0.57231635",
"0.5651493",
"0.5646747",
"0.5616531",
"0.5610424",
"0.56057763",
"0.5573422",
"0.55470526",
"0.5529936",
"0.55223... | 0.8254092 | 0 |
Test the code loading example | def test_documentation_popxl_code_loading(self):
filename = "code_loading.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_example(decorated_example):\n import visual_coding_2p_analysis",
"def test_examples():\n import airconics\n # pytest runs test files in ./__pycache__: need to go up two levels\n example_dir = os.path.abspath(\n os.path.join(__file__, '..', '..', 'examples', 'core'))\n example_scrip... | [
"0.7195194",
"0.7059802",
"0.6890149",
"0.6869557",
"0.6869557",
"0.6869557",
"0.6869557",
"0.67875314",
"0.6767249",
"0.6749942",
"0.67483723",
"0.67235684",
"0.6716485",
"0.6689319",
"0.66765344",
"0.66539055",
"0.66304076",
"0.6627514",
"0.65707266",
"0.6563701",
"0.654868... | 0.74833703 | 0 |
Test the nested code loading example | def test_documentation_popxl_nested_code_loading(self):
filename = "code_loading_nested.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_example(decorated_example):\n import visual_coding_2p_analysis",
"def inner_test():\n pass",
"def inner_test():\n pass",
"def test_documentation_popxl_code_loading(self):\n filename = \"code_loading.py\"\n self.run_python(filename, file_dir=working_dir, working... | [
"0.6656562",
"0.6570933",
"0.6570933",
"0.6489614",
"0.6259519",
"0.6246904",
"0.61843383",
"0.61584216",
"0.61110884",
"0.61107814",
"0.6097114",
"0.6087612",
"0.60835487",
"0.60752654",
"0.60712975",
"0.6059714",
"0.60356414",
"0.60127175",
"0.60122466",
"0.601197",
"0.5992... | 0.83801514 | 0 |
Test the nested Session contexts example | def test_documentation_popxl_nested_session_contexts(self):
filename = "nested_session_contexts.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_session():",
"def test_resource(data_manager):\n sessions = set([])\n with data_manager.dal():\n context1 = current_context._get_current_object()\n session = context1.sqlalchemy\n assert isinstance(session, orm.Session)\n sessions.add(session)\n\n with data_m... | [
"0.67018175",
"0.6668321",
"0.6427891",
"0.63258743",
"0.62819177",
"0.6264992",
"0.6244206",
"0.6197762",
"0.6128129",
"0.60963386",
"0.59784377",
"0.59195083",
"0.5882602",
"0.58783644",
"0.5873574",
"0.58591706",
"0.5849194",
"0.584282",
"0.5837098",
"0.58088905",
"0.58034... | 0.8149533 | 0 |
Test the popxl call_with_info example | def test_documentation_popxl_call_with_info(self):
filename = "call_with_info.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hxlinfo():\n run_script(hxlinfo_main)",
"def test_get_info(self):\n pass",
"def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n... | [
"0.6383911",
"0.5714342",
"0.55663514",
"0.5375583",
"0.532167",
"0.52700174",
"0.5258182",
"0.5240253",
"0.51907164",
"0.5187251",
"0.51472026",
"0.50939924",
"0.5084021",
"0.50806963",
"0.507654",
"0.5028689",
"0.50142485",
"0.49954024",
"0.4964053",
"0.49308",
"0.49162632"... | 0.82054126 | 0 |
Test the popxl basic repeat example | def test_documentation_popxl_repeat_0(self):
filename = "repeat_graph_0.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_di... | [
"0.6505946",
"0.6401242",
"0.58164656",
"0.54440105",
"0.53839743",
"0.5373807",
"0.53707576",
"0.53479195",
"0.5337674",
"0.5326383",
"0.5316677",
"0.53121865",
"0.53121865",
"0.5300285",
"0.5297226",
"0.52901715",
"0.5276409",
"0.5268856",
"0.5196304",
"0.5196304",
"0.51730... | 0.6540377 | 0 |
Test the popxl getting / setting tensor data example | def test_documentation_popxl_get_set_tensors(self):
filename = "tensor_get_write.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def test_predictor():",
"def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n ... | [
"0.6889487",
"0.65411484",
"0.62041",
"0.61115885",
"0.60792667",
"0.60625935",
"0.5973008",
"0.5798675",
"0.579797",
"0.57839084",
"0.5775764",
"0.5711267",
"0.57093495",
"0.5704983",
"0.5672286",
"0.5643146",
"0.5622259",
"0.5604334",
"0.5597279",
"0.5581926",
"0.558076",
... | 0.7280651 | 0 |
Test the popxl autodiff op | def test_documentation_popxl_autodiff(self):
filename = "autodiff.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_data_norange(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n lensumrange = random.randint(1, 10)\n\n ex.nreps = nreps\n ex.sumrange = [\"j\", range(lensumrange)]\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.... | [
"0.49731633",
"0.49007177",
"0.4868027",
"0.48571473",
"0.48517647",
"0.48516244",
"0.4770387",
"0.47574338",
"0.474696",
"0.4732291",
"0.47281486",
"0.47268423",
"0.47249767",
"0.47073162",
"0.46844417",
"0.46804345",
"0.46610695",
"0.4651014",
"0.4639932",
"0.46340024",
"0.... | 0.6394983 | 0 |
Test the popxl in sequence context manager | def test_documentation_popxl_in_sequence(self):
filename = "in_sequence.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_pop_methods(self):\n\n batch = Batch(Mock())\n\n # mock BatchRequests\n mock_obj = Mock()\n mock_ref = Mock()\n batch._objects_batch = mock_obj\n batch._reference_batch = mock_ref\n\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_not_called(... | [
"0.58948493",
"0.5882581",
"0.5770228",
"0.54232585",
"0.54092556",
"0.54075843",
"0.5373329",
"0.5303253",
"0.53030944",
"0.5287886",
"0.52534574",
"0.5221736",
"0.52018183",
"0.5189823",
"0.5175281",
"0.5172834",
"0.5172073",
"0.51521003",
"0.51421815",
"0.5127545",
"0.5124... | 0.65936136 | 0 |
Test the popxl remote variable | def test_documentation_popxl_remote_var(self):
filename = "remote_variable.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_remote_rts_var(self):\n filename = \"remote_rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir,... | [
"0.62325144",
"0.53577113",
"0.53224456",
"0.5209967",
"0.51961607",
"0.5094487",
"0.50844604",
"0.5058393",
"0.5020116",
"0.49631917",
"0.49456343",
"0.49408296",
"0.4912187",
"0.48899758",
"0.48812777",
"0.48232034",
"0.47519946",
"0.46955076",
"0.46878415",
"0.46760944",
"... | 0.70022154 | 0 |
Test the popxl remote rts variable | def test_documentation_popxl_remote_rts_var(self):
filename = "remote_rts_var.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, wo... | [
"0.6584408",
"0.6459491",
"0.5550423",
"0.54029787",
"0.53768945",
"0.5342253",
"0.528288",
"0.52809453",
"0.5237307",
"0.51818335",
"0.5181425",
"0.50585806",
"0.5056922",
"0.50432426",
"0.503633",
"0.50314456",
"0.50298536",
"0.50265086",
"0.5013454",
"0.500096",
"0.4989955... | 0.6953812 | 0 |
Test the popxl rts variable | def test_documentation_popxl_rts_var(self):
filename = "rts_var.py"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)... | [
"0.5512495",
"0.54203445",
"0.5234334",
"0.5186235",
"0.513492",
"0.5132137",
"0.5124492",
"0.5119325",
"0.5085518",
"0.50540435",
"0.50286376",
"0.5028228",
"0.50170285",
"0.50151104",
"0.50103855",
"0.4989811",
"0.49670818",
"0.49645856",
"0.49455714",
"0.493474",
"0.493474... | 0.6135992 | 0 |
Test the popxl mnist with replication example | def test_documentation_popxl_mnist_replication_train(self):
filename = "mnist_rts.py --replication-factor 2"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n ... | [
"0.664523",
"0.6479431",
"0.6476196",
"0.64748347",
"0.64239633",
"0.64103997",
"0.6363367",
"0.6277",
"0.62729543",
"0.61764646",
"0.60306984",
"0.596759",
"0.5963632",
"0.5930057",
"0.58732194",
"0.58112574",
"0.58030224",
"0.5776231",
"0.57753116",
"0.56806064",
"0.5629193... | 0.7531117 | 0 |
Test the popxl mnist with RTS example | def test_documentation_popxl_mnist_rts_train(self):
filename = "mnist_rts.py --replication-factor 2 --rts"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = m... | [
"0.69393843",
"0.6572477",
"0.6475336",
"0.6470662",
"0.63731503",
"0.6351866",
"0.62574834",
"0.62303245",
"0.6214079",
"0.61291355",
"0.6029285",
"0.6016668",
"0.601599",
"0.6014156",
"0.59936774",
"0.5969918",
"0.595324",
"0.5940546",
"0.59249955",
"0.5917552",
"0.58708",
... | 0.6889731 | 1 |
Test the popxl mnist with RTS example | def test_documentation_popxl_mnist_rts_train_test(self):
filename = "mnist_rts.py --replication-factor 2 --rts --test"
self.run_python(filename, file_dir=working_dir, working_dir=working_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_t... | [
"0.6889731",
"0.6572477",
"0.6475336",
"0.6470662",
"0.63731503",
"0.6351866",
"0.62574834",
"0.62303245",
"0.6214079",
"0.61291355",
"0.6029285",
"0.6016668",
"0.601599",
"0.6014156",
"0.59936774",
"0.5969918",
"0.595324",
"0.5940546",
"0.59249955",
"0.5917552",
"0.58708",
... | 0.69393843 | 0 |
Sets the errors of this MigrateListingResponse. | def errors(self, errors):
self._errors = errors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def errors(self, errors):\n \n self._errors = errors",
"def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors",
"def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")",
"def errors... | [
"0.6449945",
"0.586166",
"0.5679023",
"0.56335896",
"0.5626413",
"0.56153715",
"0.56153715",
"0.55020136",
"0.54732645",
"0.5438516",
"0.5403028",
"0.53804886",
"0.5344446",
"0.53443104",
"0.532449",
"0.5315556",
"0.5290784",
"0.52629125",
"0.52527165",
"0.52467006",
"0.52452... | 0.63594306 | 1 |
Sets the inventory_item_group_key of this MigrateListingResponse. | def inventory_item_group_key(self, inventory_item_group_key):
self._inventory_item_group_key = inventory_item_group_key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def with_group_key(self, group_key):\n self.group_key = group_key\n return self",
"def add_inventory_group(self, key):\n host_dict = {'hosts': [], 'vars': {}}\n self.inventory[key] = host_dict\n return",
"def group_id(self, group_id):\n\n self._group_id = group_id",
... | [
"0.61608106",
"0.5747907",
"0.5501611",
"0.5501611",
"0.5501611",
"0.5501611",
"0.5501611",
"0.5501611",
"0.54646176",
"0.5375903",
"0.53397626",
"0.53107274",
"0.53079605",
"0.53079605",
"0.53079605",
"0.5276046",
"0.5275524",
"0.5234625",
"0.51415646",
"0.5023584",
"0.50226... | 0.8215857 | 0 |
Sets the inventory_items of this MigrateListingResponse. | def inventory_items(self, inventory_items):
self._inventory_items = inventory_items | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inventory(self, inventory):\n\n self._inventory = inventory",
"def inventory_id(self, inventory_id):\n\n self._inventory_id = inventory_id",
"def items(self, items: List[InlineResponse200Items]):\n if items is None:\n raise ValueError(\"Invalid value for `items`, must not be... | [
"0.64316744",
"0.5630393",
"0.56029606",
"0.53314924",
"0.52890193",
"0.52213633",
"0.5127491",
"0.5053964",
"0.50323474",
"0.50142586",
"0.49879345",
"0.4986131",
"0.48904306",
"0.48881936",
"0.48744634",
"0.4842486",
"0.48069787",
"0.466679",
"0.4665811",
"0.46414807",
"0.4... | 0.78284794 | 0 |
Sets the listing_id of this MigrateListingResponse. | def listing_id(self, listing_id):
self._listing_id = listing_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_listing(request, listing_id):\n listing = get_object_or_404(Listing, pk=listing_id)\n\n listing.delete()\n messages.success(\n request,\n 'Your listing has been removed from the database.')\n\n return redirect(reverse('addlisting'))",
"def update(self, amz_listing):\n ... | [
"0.54151386",
"0.50424653",
"0.49489492",
"0.49403444",
"0.454123",
"0.4485277",
"0.4483447",
"0.44773117",
"0.44772324",
"0.44687784",
"0.4437293",
"0.44051337",
"0.4390934",
"0.4387617",
"0.43608093",
"0.43608093",
"0.4329542",
"0.430929",
"0.4308864",
"0.4278942",
"0.42528... | 0.8304697 | 0 |
Sets the marketplace_id of this MigrateListingResponse. | def marketplace_id(self, marketplace_id):
self._marketplace_id = marketplace_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_AWSMarketplaceId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMarketplaceId', value)",
"def registration_marketplace_id(self, registration_marketplace_id):\n\n self._registration_marketplace_id = registration_marketplace_id",
"def listing_id(self, listing_id):\n\n ... | [
"0.68738204",
"0.60930204",
"0.5767067",
"0.5177802",
"0.5172527",
"0.49900728",
"0.49834523",
"0.49834523",
"0.49834523",
"0.49834523",
"0.49759004",
"0.49217004",
"0.4853357",
"0.48184267",
"0.47949788",
"0.47403908",
"0.4719058",
"0.46994156",
"0.46824563",
"0.4669242",
"0... | 0.7607272 | 0 |
Sets the status_code of this MigrateListingResponse. | def status_code(self, status_code):
self._status_code = status_code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def status_code(self, status_code):\n allowed_values = [1, 100, 101, 102, 103, 104, 105] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status_code not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status_code` ({0})... | [
"0.62368333",
"0.61369103",
"0.59862417",
"0.5981169",
"0.5981169",
"0.5981169",
"0.5980721",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
"0.5924389",
... | 0.6872091 | 0 |
Sets the warnings of this MigrateListingResponse. | def warnings(self, warnings):
self._warnings = warnings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def warnings(self):\n return self.__warnings",
"def allow_warnings(self, allow_warnings):\n self._allow_warnings = allow_warnings",
"def setwarnings(self, on):\n # diese Funktion macht eigentlich nichts, ist aber wegen der Kombatibilitaet vorhanden\n print(f\"setwarnings: {on}\")",
... | [
"0.6353443",
"0.6246493",
"0.61963874",
"0.6170872",
"0.61554074",
"0.60583675",
"0.6048729",
"0.6048729",
"0.5980356",
"0.5967424",
"0.5896825",
"0.58848745",
"0.5873911",
"0.5829192",
"0.5811515",
"0.56476164",
"0.56447476",
"0.56372",
"0.562612",
"0.5593209",
"0.5593209",
... | 0.74362904 | 0 |
Return 'WHERE' clause that implements kwds_filter constraints. | def _build_where_clause(**kwds_filter):
clause = []
params = []
items = kwds_filter.items()
items = sorted(items, key=lambda x: x[0]) # Ordered by key.
for key, val in items:
if _is_nsiterable(val):
clause.append(key + ' IN (%s)' % (', '.join('?' * le... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_where_clause(**kwds_filter):\n clause = []\n params = []\n items = kwds_filter.items()\n items = sorted(items, key=lambda x: x[0]) # Ordered by key.\n for key, val in items:\n if nonstringiter(val):\n clause.append(key + ' IN (%s)' % (', '.jo... | [
"0.70142585",
"0.68189335",
"0.66024506",
"0.6276264",
"0.6107378",
"0.6103338",
"0.6057521",
"0.5986232",
"0.59493124",
"0.5914506",
"0.5914506",
"0.5914506",
"0.58369356",
"0.5758533",
"0.569798",
"0.56843215",
"0.56451374",
"0.5600097",
"0.55459034",
"0.5538299",
"0.553082... | 0.7027632 | 0 |
Normalize value for use as SQLite column name. | def _normalize_column(column):
if not isinstance(column, str):
msg = "expected column of type 'str', got {0!r} instead"
raise TypeError(msg.format(column.__class__.__name__))
column = column.strip()
column = column.replace('"', '""') # Escape quotes.
if column ==... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize(self, value):\n return str(value)",
"def _normalize_expanded_field(value):\n\n value = value.strip()\n value = re.sub(r'\\s{2,}', ' ', value)\n value = re.sub(r'/{2,}', '/', value)\n value = re.sub(r'\\\\{2,}', '\\\\\\\\', value)\n value = re.sub(r'-{2,}', '-', value)\n val... | [
"0.67484534",
"0.6414139",
"0.64000803",
"0.6394291",
"0.62145936",
"0.62083966",
"0.62030125",
"0.6104114",
"0.6069231",
"0.604914",
"0.59973955",
"0.5969188",
"0.59618396",
"0.59541255",
"0.5941385",
"0.5940318",
"0.58399045",
"0.5810996",
"0.5807061",
"0.5782344",
"0.57602... | 0.6757992 | 1 |
Alternate constructor to load an existing collection of records into a tempoarary SQLite database. Loads data (an iterable of lists, tuples, or dicts) into a temporary table | def from_records(cls, data, columns=None):
temptable = TemporarySqliteTable(data, columns)
return cls(temptable.connection, temptable.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_records(cls, data, columns=None):\n connection, table = _load_temp_sqlite_table(columns, data)\n return cls(connection, table)",
"def load_data(cursor, table, *args, **kwds):\n try:\n records, = args\n columns = None\n except ValueError:\n columns, records = args... | [
"0.6956785",
"0.6560394",
"0.5986285",
"0.597718",
"0.5919446",
"0.5904428",
"0.58205336",
"0.57884705",
"0.5786599",
"0.57413965",
"0.5737391",
"0.57340336",
"0.56946224",
"0.5691723",
"0.56570345",
"0.56243944",
"0.56065536",
"0.5595917",
"0.55688184",
"0.5563457",
"0.55514... | 0.7079159 | 0 |
Parse the auditbeat log file, to generate audit event model and write to the result file(optional) | def parse(self, output=True):
if not self.type == LogType.audit:
log.error("LogParser doesn't support nonetype yet.")
return
stashes = list()
with open(self.path_log, 'r') as f:
for line in f.readlines():
event: Dict = json.loads(line)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)",
"def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r... | [
"0.61394227",
"0.6064906",
"0.59986395",
"0.5834477",
"0.5793847",
"0.57524127",
"0.5733288",
"0.5686557",
"0.5646877",
"0.55704165",
"0.55531",
"0.5498819",
"0.5413356",
"0.53873146",
"0.5380587",
"0.5363391",
"0.5344947",
"0.53289014",
"0.53170085",
"0.5265918",
"0.5260259"... | 0.63416183 | 0 |
Initialise clusters by alternating the bins to which the vectors are assigned. | def alternating_bins_initialisation(self, pixel_data, a=None, b=None):
if not a or not b:
a = 0
b = len(pixel_data)
clusters = defaultdict(list)
for i in range(a, b): # selecting sevens as data set
clusters[i % self.K].append(pixel_data[i])
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_... | [
"0.73569727",
"0.6890101",
"0.66579676",
"0.65458935",
"0.6461489",
"0.6289846",
"0.62089014",
"0.6200618",
"0.61897707",
"0.60647523",
"0.60470694",
"0.603594",
"0.6009594",
"0.6008557",
"0.596944",
"0.59262115",
"0.5919822",
"0.58949685",
"0.58171034",
"0.58091116",
"0.5795... | 0.700094 | 1 |
Get the codebook vectors. | def get_cb_vectors(self):
return self.cb_vectors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vectors(self):\n return self.vecs[:]",
"def get_vectors(self, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = self.model_dbow.docv... | [
"0.6734293",
"0.6306046",
"0.6234584",
"0.6101457",
"0.6060832",
"0.5943614",
"0.59248465",
"0.58844465",
"0.58299667",
"0.5818226",
"0.5769358",
"0.5711672",
"0.5681364",
"0.5677561",
"0.5665824",
"0.5648895",
"0.5648038",
"0.5617257",
"0.5584856",
"0.5566153",
"0.55533874",... | 0.6465732 | 1 |
Extracts features from the final codebook vectors using the L2 norm. The way it works is that we pass in the data as an argument and the function produces len(data) feature vectors such that f(x_i)=[a_1 ... a_K] and a_j = || x_i c_j || where c_j is the codebook vector. | def extract_features(self, data):
# TODO: Should feature extraction be done on the testing data? In the lecture notes
# TODO: it is not done with the training data, but with the test data.
# TODO: Maybe we should use the validate data when we do cross-validation.
features = np.zeros([l... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_liwc_features(train_data, test_data):\n print(\"getting liwc features\")\n train_liwc_matrix = []\n test_liwc_matrix = []\n for phrase in train_data:\n liwc_scores = word_category_counter.score_text(phrase)\n feature_vector = []\n for key in liwc_categories:\n if... | [
"0.60243773",
"0.59016556",
"0.58867896",
"0.5756956",
"0.573142",
"0.56395006",
"0.56027967",
"0.55864096",
"0.55687845",
"0.5554088",
"0.55537987",
"0.54962337",
"0.54528916",
"0.54197",
"0.53998107",
"0.5392125",
"0.53568494",
"0.5354972",
"0.5350437",
"0.5302239",
"0.5282... | 0.6925255 | 0 |
Sets the node_b of this NetflowFilters. | def node_b(self, node_b):
self._node_b = node_b | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_node(self, b):\n return b == self.__node_b",
"def setB(self, b):\n\t\tself.b = int(b)",
"def b(self, b):\n\n self._b = b",
"def add_bilink(self, nodeport_a, nodeport_b, bilink):",
"def set_bias_for_node(node: Node, value: np.ndarray):\n bias = get_bias_for_node(node)\n if bias is... | [
"0.61645985",
"0.6144432",
"0.60478383",
"0.5791615",
"0.576705",
"0.5688763",
"0.5581127",
"0.5452997",
"0.5451288",
"0.53915596",
"0.53915596",
"0.5351376",
"0.5198766",
"0.5175632",
"0.5073717",
"0.5065744",
"0.50484663",
"0.50223225",
"0.50185555",
"0.50114703",
"0.500081... | 0.8324408 | 0 |
Sets the qos_type of this NetflowFilters. | def qos_type(self, qos_type):
self._qos_type = qos_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def qos(self, qos: int):\n if qos is not None and qos > 2: # noqa: E501\n raise ValueError(\"Invalid value for `qos`, must be a value less than or equal to `2`\") # noqa: E501\n if qos is not None and qos < 0: # noqa: E501\n raise ValueError(\"Invalid value for `qos`, must be... | [
"0.69267035",
"0.665194",
"0.6260876",
"0.6227411",
"0.61854005",
"0.6163835",
"0.61467403",
"0.6086317",
"0.6053018",
"0.60085255",
"0.5876559",
"0.5848173",
"0.56665426",
"0.56485814",
"0.5635002",
"0.559807",
"0.53704786",
"0.52488047",
"0.5220447",
"0.5210421",
"0.5194985... | 0.8767736 | 0 |
Sets the device_interfaces of this NetflowFilters. | def device_interfaces(self, device_interfaces):
self._device_interfaces = device_interfaces | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces",
"def netflow_devices(self, netflow_devices):\n\n self._netflow_devices = netflow_devices",
"def ifaces(self, ifaces):\n \n self._ifaces = ifaces",
"def update_interfaces_config(se... | [
"0.68550044",
"0.63056403",
"0.6121295",
"0.6114787",
"0.60211456",
"0.59780586",
"0.5934186",
"0.58451796",
"0.58451796",
"0.57590055",
"0.5745199",
"0.56535774",
"0.54265004",
"0.5393894",
"0.5360232",
"0.53313124",
"0.53304845",
"0.53277284",
"0.5235097",
"0.51627266",
"0.... | 0.81078243 | 0 |
Sets the ports of this NetflowFilters. | def ports(self, ports):
self._ports = ports | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def modify_ports(self, ports, **kwargs):\n pass",
"def modify_rstp_ports(self, ports, **kwargs):\n pass",
"def https_ports(self, https_ports):\n\n self._https_ports = https_ports",
"def http_ports(self, http_ports):\n\n self._http_ports = http_ports",
"def make_external_ports(se... | [
"0.75998574",
"0.6880327",
"0.6804348",
"0.6704648",
"0.65429854",
"0.6481354",
"0.6428148",
"0.61421347",
"0.60838145",
"0.60838145",
"0.60838145",
"0.6080848",
"0.6070129",
"0.60178405",
"0.59989095",
"0.5934444",
"0.59169525",
"0.5844672",
"0.58260345",
"0.5825795",
"0.580... | 0.8199667 | 0 |
Sets the ip_version of this NetflowFilters. | def ip_version(self, ip_version):
self._ip_version = ip_version | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vip(self, vip):\n\n self._vip = vip",
"def protocol_version(self, protocol_version):\n\n self._protocol_version = protocol_version",
"def setVersion(self, version) :\n if version is not None :\n try :\n self.version = [int(p) for p in version.split(\".\")]\n ... | [
"0.63388056",
"0.58770186",
"0.5694638",
"0.5656582",
"0.5656582",
"0.56357116",
"0.55922115",
"0.55871147",
"0.5562405",
"0.55404204",
"0.5522308",
"0.55216855",
"0.5502596",
"0.5502596",
"0.5502596",
"0.5491824",
"0.54864305",
"0.54864305",
"0.54864305",
"0.54864305",
"0.54... | 0.8065355 | 0 |
Sets the netflow_devices of this NetflowFilters. | def netflow_devices(self, netflow_devices):
self._netflow_devices = netflow_devices | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def devices(self, devices):\n\n self._devices = devices",
"def devices(self, devices):\n\n self._devices = devices",
"def set_devices(args):\n global devices\n if args is not None:\n devices = [torch.device(i) for i in ast.literal_eval('[' + args + ']')]\n torch.cuda.set_devic... | [
"0.6428995",
"0.6428995",
"0.5585992",
"0.54547",
"0.5365216",
"0.5246433",
"0.51710474",
"0.50898916",
"0.50661755",
"0.5038719",
"0.5004575",
"0.49756554",
"0.4974928",
"0.49667272",
"0.4923213",
"0.48840016",
"0.48089606",
"0.48036066",
"0.47994307",
"0.4792203",
"0.479017... | 0.86378056 | 0 |
Sets the top of this NetflowFilters. | def top(self, top):
self._top = top | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bb_top(self, bb_top: float):\n\n self._bb_top = bb_top",
"def top(self):\n # Sets our Z value to one.\n self.setZValue(1)\n # Set every colliding items Z value to 0\n for sibling in self.collidingItems():\n sibling.setZValue(0)",
"def always_top(self, value: bo... | [
"0.67293346",
"0.66441184",
"0.66229576",
"0.6613329",
"0.6542814",
"0.65164036",
"0.6361207",
"0.6278201",
"0.62754864",
"0.620966",
"0.60119075",
"0.60119075",
"0.60119075",
"0.59842026",
"0.59842026",
"0.5849825",
"0.58208567",
"0.5782378",
"0.5741379",
"0.5739408",
"0.572... | 0.73888093 | 0 |
Sets the app_type of this NetflowFilters. | def app_type(self, app_type):
self._app_type = app_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _app_type(self):\n return self._event['app_type']",
"def set_type(self, type):\n self.type = type",
"def set_type(self, type):\n self.type = type",
"def setFilter(self, type: int, filter: int) -> None:\n ...",
"def set_type(self, type):\n self._type = type",
"def se... | [
"0.6110402",
"0.6007707",
"0.6007707",
"0.5881547",
"0.5826859",
"0.5705285",
"0.5672125",
"0.5645346",
"0.5516108",
"0.5502036",
"0.5485557",
"0.54566836",
"0.5454073",
"0.5433174",
"0.54322046",
"0.54322046",
"0.54322046",
"0.54322046",
"0.54322046",
"0.54322046",
"0.543220... | 0.8083322 | 0 |
Sets the nbar_application_names of this NetflowFilters. | def nbar_application_names(self, nbar_application_names):
self._nbar_application_names = nbar_application_names | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)",
"def set_name(self, application_name):\r\n self._name = application_name",
"def app_names(self):\n return self.get_app_names()",
"def app_name(self, value):\n self._... | [
"0.5579578",
"0.5503502",
"0.51913893",
"0.5184525",
"0.51769996",
"0.5006621",
"0.4993549",
"0.48435128",
"0.48244205",
"0.4778834",
"0.4738988",
"0.47321886",
"0.47280967",
"0.47159183",
"0.4684673",
"0.46767935",
"0.46593088",
"0.46463352",
"0.4644338",
"0.46052152",
"0.46... | 0.86759675 | 0 |
Sets the node_a of this NetflowFilters. | def node_a(self, node_a):
self._node_a = node_a | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_node(self, node):\n self.__node = node",
"def from_node(self, a):\n return a == self.__node_a",
"def nodes(self, nodes_array):\n self.nodes_set = nodes_array",
"def set_node(self, name, state):\n self.source_net.nodes[name] = state",
"def __call__(self, node_A):\n ... | [
"0.58251303",
"0.5789575",
"0.5745441",
"0.5615295",
"0.557767",
"0.5573292",
"0.5538027",
"0.55000454",
"0.5496195",
"0.54542196",
"0.5382919",
"0.5307776",
"0.51498705",
"0.5089265",
"0.507674",
"0.5060496",
"0.5041549",
"0.50394356",
"0.4960041",
"0.49577066",
"0.4915388",... | 0.79782516 | 0 |
Sets the conversation of this NetflowFilters. | def conversation(self, conversation):
self._conversation = conversation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_conversation(self, conversation):\r\n self.conversation = conversation",
"def set_gift_conversation(self, conversation_string):\r\n self.gift_conversation = conversation_string",
"def update(self, conversation):\n self.content_type = \"application/json\"\n self.method = \"PA... | [
"0.7668855",
"0.6350055",
"0.52989596",
"0.5078657",
"0.50487155",
"0.50432694",
"0.49181578",
"0.49066126",
"0.4869592",
"0.48333606",
"0.4763982",
"0.47448006",
"0.47074327",
"0.46813306",
"0.46577984",
"0.45800743",
"0.4569655",
"0.45474747",
"0.45351785",
"0.44976678",
"0... | 0.72746265 | 1 |
Sets the if_names of this NetflowFilters. | def if_names(self, if_names):
self._if_names = if_names | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ifaces_init(*ifnames):\n for ifname in ifnames:\n _set_eth_admin_state(ifname, schema.InterfaceState.ABSENT)",
"def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)",
"def setFilters(self, filters):\n self.__filters = ... | [
"0.5829484",
"0.5813386",
"0.5766594",
"0.5461771",
"0.5420651",
"0.53666186",
"0.5335555",
"0.5328612",
"0.5256119",
"0.5160971",
"0.5098839",
"0.50947213",
"0.5075196",
"0.5055912",
"0.5040678",
"0.4927203",
"0.49013257",
"0.48951134",
"0.48855725",
"0.48845008",
"0.4844863... | 0.8094472 | 0 |
Sets the direction of this NetflowFilters. | def direction(self, direction):
self._direction = direction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_direction(self, direction: str) -> None:\n self.wink.set_fan_direction(direction)",
"def set_direction(self, new_dir):\n self.__direction = new_dir",
"def setDirection(self,stepDir = 2):\n pass",
"def setdirection(self, *args, **kwargs):\n return _coordsys.coordsys_setdire... | [
"0.7139598",
"0.7040138",
"0.7006834",
"0.69987583",
"0.6970682",
"0.68825686",
"0.6802282",
"0.6730239",
"0.63998103",
"0.6399511",
"0.6314273",
"0.6307516",
"0.63062197",
"0.63062197",
"0.62925655",
"0.62565696",
"0.6163797",
"0.6163797",
"0.60774386",
"0.60496444",
"0.6030... | 0.7080141 | 1 |
To add parents to database | def add_parent(session, df):
try:
for _, row in df.iterrows():
parent = Parent()
parent.name = row['parent_name']
parent.family = row['family']
session.add(parent)
except Exception as ex:
session.rollback()
raise ex
else:
sessio... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_append_children_category(self):\n category = Category(catname='olympic games')\n category1 = Category(catname='Tennis')\n category.parents.append(category1)\n category.save()\n assert category.parents",
"def add_parent(sender, instance, **kwargs):\n if not kwargs['c... | [
"0.6654415",
"0.65198356",
"0.6462206",
"0.64579284",
"0.6353594",
"0.62729967",
"0.6186695",
"0.61660165",
"0.6154463",
"0.61290205",
"0.61266243",
"0.6095059",
"0.6068027",
"0.6065559",
"0.6063551",
"0.6041484",
"0.6020467",
"0.60039234",
"0.60022867",
"0.59916437",
"0.5974... | 0.6953736 | 0 |
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL. | def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):
path = '/v1/global/root_outcome_group'
url = request_ctx.base_api_url + path.format()
response = client.get(request_ctx, url, **request_kwargs)
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(account_id=account_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return resp... | [
"0.7103075",
"0.67741513",
"0.56865895",
"0.51907563",
"0.5116719",
"0.5102657",
"0.5087065",
"0.5047118",
"0.4916807",
"0.49087209",
"0.48846796",
"0.47762623",
"0.47364914",
"0.4677289",
"0.46656385",
"0.46637428",
"0.46563548",
"0.46534562",
"0.45972314",
"0.4521781",
"0.4... | 0.76904535 | 0 |
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL. | def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs):
path = '/v1/accounts/{account_id}/root_outcome_group'
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, **request_kwargs)
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response",
"def redirect_to_root_outcome_group_for_c... | [
"0.76904535",
"0.67741513",
"0.56865895",
"0.51907563",
"0.5116719",
"0.5102657",
"0.5087065",
"0.5047118",
"0.4916807",
"0.49087209",
"0.48846796",
"0.47762623",
"0.47364914",
"0.4677289",
"0.46656385",
"0.46637428",
"0.46563548",
"0.46534562",
"0.45972314",
"0.4521781",
"0.... | 0.7103075 | 1 |
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed). | def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):
path = '/v1/global/outcome_groups/{id}'
payload = {
'title' : title,
'description' : description,
'vendor_guid' : vendor_guid,
'paren... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 've... | [
"0.61605686",
"0.61406356",
"0.5958074",
"0.59414357",
"0.5797429",
"0.5668177",
"0.5624319",
"0.5621426",
"0.561763",
"0.5572149",
"0.5539592",
"0.55051327",
"0.54824334",
"0.5461233",
"0.54360193",
"0.5433018",
"0.53988296",
"0.5368346",
"0.53458875",
"0.52537507",
"0.52454... | 0.67553115 | 0 |
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed). | def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):
path = '/v1/accounts/{account_id}/outcome_groups/{id}'
payload = {
'title' : title,
'description' : description,
'vendor_guid' ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n ... | [
"0.67553115",
"0.61406356",
"0.5958074",
"0.59414357",
"0.5797429",
"0.5668177",
"0.5624319",
"0.5621426",
"0.561763",
"0.5572149",
"0.5539592",
"0.55051327",
"0.54824334",
"0.5461233",
"0.54360193",
"0.5433018",
"0.53988296",
"0.5368346",
"0.53458875",
"0.52537507",
"0.52454... | 0.61605686 | 1 |
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion ... | def delete_outcome_group_global(request_ctx, id, **request_kwargs):
path = '/v1/global/outcome_groups/{id}'
url = request_ctx.base_api_url + path.format(id=id)
response = client.delete(request_ctx, url, **request_kwargs)
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response",
... | [
"0.66492325",
"0.6541948",
"0.6461525",
"0.6348538",
"0.633461",
"0.6283196",
"0.6217388",
"0.6203314",
"0.6168106",
"0.61588556",
"0.6150082",
"0.61439496",
"0.6109929",
"0.61078966",
"0.6060277",
"0.60184395",
"0.6001307",
"0.6001307",
"0.59507966",
"0.5928808",
"0.5904541"... | 0.662239 | 1 |
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion ... | def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs):
path = '/v1/accounts/{account_id}/outcome_groups/{id}'
url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)
response = client.delete(request_ctx, url, **request_kwargs)
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_outcome_group_global(request_ctx, id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response",
"def test_080_group_delete(self):\n\n testf... | [
"0.662239",
"0.6541948",
"0.6461525",
"0.6348538",
"0.633461",
"0.6283196",
"0.6217388",
"0.6203314",
"0.6168106",
"0.61588556",
"0.6150082",
"0.61439496",
"0.6109929",
"0.61078966",
"0.6060277",
"0.60184395",
"0.6001307",
"0.6001307",
"0.59507966",
"0.5928808",
"0.5904541",
... | 0.66492325 | 0 |
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome... | def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):
path = '/v1/global/outcome_groups/{id}/outcomes'
payload = {
'outcome_id' : outcome_... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {... | [
"0.717459",
"0.7163847",
"0.7061997",
"0.6714336",
"0.66986096",
"0.5106874",
"0.4798102",
"0.47242922",
"0.45309213",
"0.44986874",
"0.44323424",
"0.44262272",
"0.44031692",
"0.4358714",
"0.43203336",
"0.43203336",
"0.43050796",
"0.43030095",
"0.4294811",
"0.42858493",
"0.42... | 0.7362988 | 0 |
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome... | def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):
path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'
payload = {
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' :... | [
"0.7362988",
"0.7163847",
"0.7061997",
"0.6714336",
"0.66986096",
"0.5106874",
"0.4798102",
"0.47242922",
"0.45309213",
"0.44986874",
"0.44323424",
"0.44262272",
"0.44031692",
"0.4358714",
"0.43203336",
"0.43203336",
"0.43050796",
"0.43030095",
"0.4294811",
"0.42858493",
"0.4... | 0.717459 | 1 |
Unlinking an outcome only deletes the outcome itself if this was the last link to the outcome in any group in any context. Aligned outcomes cannot be deleted; as such, if this is the last link to an aligned outcome, the unlinking will fail. | def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):
path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'
url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)
response = client.delete(request_ctx, url, **request_kwargs)
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unlink(self, link_id):",
"def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n res... | [
"0.6474747",
"0.63120115",
"0.6281923",
"0.61563367",
"0.61477256",
"0.6030046",
"0.58153516",
"0.56244844",
"0.5609927",
"0.5545071",
"0.55448717",
"0.54989296",
"0.5490254",
"0.5487666",
"0.5484662",
"0.54513216",
"0.5432963",
"0.54169786",
"0.53966224",
"0.5357432",
"0.535... | 0.6547148 | 0 |
Creates a new empty subgroup under the outcome group with the given title and description. | def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):
path = '/v1/global/outcome_groups/{id}/subgroups'
payload = {
'title' : title,
'description' : description,
'vendor_guid' : vendor_guid,
}
url = request_ctx.base_api_url + p... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )",
"def with_group(title: str) -> Generator[None, None, None]:\n if os.environ.get(\"GITHUB_ACTIONS\", \"false\") != \"true\":\n console.print(\"#\" * 10 + \" [bright_blue]\" + title + \"[/] \" + \"#\" * 10)\n yield\n ... | [
"0.5995512",
"0.57896537",
"0.5699493",
"0.55475307",
"0.55475307",
"0.55366933",
"0.55120105",
"0.5418014",
"0.5416043",
"0.53805554",
"0.53521186",
"0.53274035",
"0.5322022",
"0.53077036",
"0.529829",
"0.5248793",
"0.5231281",
"0.5218727",
"0.5214925",
"0.5195291",
"0.51951... | 0.6064545 | 0 |
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy orga... | def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs):
path = '/v1/accounts/{account_id}/outcome_groups/{id}/import'
payload = {
'source_outcome_group_id' : source_outcome_group_id,
}
url = request_ctx.base_api_url + path.format(account_id=acc... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_... | [
"0.5898711",
"0.5813369",
"0.57389224",
"0.5706337",
"0.56055593",
"0.55976254",
"0.5595738",
"0.55490917",
"0.5544522",
"0.5541461",
"0.5518073",
"0.53831655",
"0.53829265",
"0.53379554",
"0.531845",
"0.5302578",
"0.52945095",
"0.52933514",
"0.5258613",
"0.5246323",
"0.52400... | 0.5874123 | 1 |
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy orga... | def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs):
path = '/v1/courses/{course_id}/outcome_groups/{id}/import'
payload = {
'source_outcome_group_id' : source_outcome_group_id,
}
url = request_ctx.base_api_url + path.format(course_id=course_i... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(acc... | [
"0.5874123",
"0.5813369",
"0.57389224",
"0.5706337",
"0.56055593",
"0.55976254",
"0.5595738",
"0.55490917",
"0.5544522",
"0.5541461",
"0.5518073",
"0.53831655",
"0.53829265",
"0.53379554",
"0.531845",
"0.5302578",
"0.52945095",
"0.52933514",
"0.5258613",
"0.5246323",
"0.52400... | 0.5898711 | 0 |
Parse challenge from a challenge response, cache it, and return it. | def _update_challenge(request: PipelineRequest, challenger: "PipelineResponse") -> HttpChallenge:
challenge = HttpChallenge(
request.http_request.url,
challenger.http_response.headers.get("WWW-Authenticate"),
response_headers=challenger.http_response.headers,
)
ChallengeCache.set_ch... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_challenge(cls, response):\n links = _parse_header_links(response)\n try:\n authzr_uri = links['up']['url']\n except KeyError:\n raise errors.ClientError('\"up\" link missing')\n return (\n response.json()\n .addCallback(\n ... | [
"0.69334584",
"0.57358587",
"0.56640327",
"0.56572354",
"0.5530632",
"0.54902357",
"0.54403126",
"0.54084736",
"0.54046005",
"0.54018176",
"0.5199438",
"0.5182611",
"0.517387",
"0.5165005",
"0.512281",
"0.5000999",
"0.49986807",
"0.4956543",
"0.4911077",
"0.49107736",
"0.4908... | 0.6099478 | 1 |
check if the reference folder is in place and all attributes are ready | def check_reference_ready():
# check to see if there is a manifest file in the default reference path
manifest_file = os.path.join(settings.DEFAULT_REFERENCE_PATH, 'manifest.json')
if not os.path.isfile(manifest_file):
_log("manifest.json file cannot be found in the reference folder; simulation wil... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))",
"def copy_file_check(self):\n pass",
"def _before_reference_check(self, maya_file, client_data=None... | [
"0.6192987",
"0.60495603",
"0.5934274",
"0.59128857",
"0.58630824",
"0.5849084",
"0.5817839",
"0.5815572",
"0.5815572",
"0.57971984",
"0.57678586",
"0.5761644",
"0.57452965",
"0.5725083",
"0.5716483",
"0.5684965",
"0.5675776",
"0.5654527",
"0.5647706",
"0.5647706",
"0.5647706... | 0.7016824 | 0 |
Callback to be called whenever the system state has changed. Checks whether or not the step has to be advanced or not | def updateState(self):
if ('cutting' in self.step_ops) and (self.cut_state.user_cutting):
self.step_ops['cutting'] = True
if ('cooking' in self.step_ops) and (self.cut_state.user_cooking):
self.step_ops['cooking'] = True
# TODO: add the rest of the operati... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()",
"def has_state_changed(self) -> bool:\r\n ...",
"def _on_step(self) -> bool:\n # print(\"locals \", self.locals)\n # # what timestep you think\n # print(\"timestep \",CustomCallback.step)... | [
"0.6339768",
"0.6194187",
"0.6136971",
"0.5964047",
"0.5904102",
"0.5903155",
"0.5893415",
"0.5889973",
"0.58527935",
"0.58454347",
"0.5844172",
"0.58329093",
"0.57805914",
"0.57780147",
"0.5772509",
"0.5751888",
"0.573898",
"0.57344913",
"0.5711796",
"0.5696087",
"0.56890595... | 0.6468291 | 0 |
Constructor for thread that will request the RSS of a particular podcast series, parse the series details and episode information, and save the information w/`storer` | def __init__(self, storer, series, i):
super(EpisodeWorker, self).__init__()
self.storer = storer
self.series = series # All series
self.i = i | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n while self.i < len(self.series):\n # Grab line + RSS\n s = self.series[self.i]\n rss = self.request_rss(s.feedUrl)\n\n # Compose Episodes\n ep_dicts = []\n for entry in rss['entries']:\n ep_dicts.append(Episode(s, entry).__dict__)\n\n # Build result dic... | [
"0.666735",
"0.582353",
"0.5599041",
"0.5515614",
"0.5502834",
"0.54765725",
"0.54175603",
"0.5364501",
"0.533091",
"0.53269607",
"0.5278374",
"0.5247649",
"0.5233488",
"0.5222864",
"0.5213202",
"0.5204529",
"0.51684767",
"0.516591",
"0.51639456",
"0.5141017",
"0.513316",
"... | 0.6067558 | 1 |
Uses information in `line` to request and return the RSS feed | def request_rss(self, url):
return feedparser.parse(url) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_rss(url):",
"def get_news(url):\r\n \r\n # parse RSS feed into list of dictionaries\r\n feed = feedparser.parse(url)\r\n\r\n # no RSS feed articles for url\r\n if len(feed['entries']) == 0:\r\n return []\r\n \r\n # get first ten articles from the RSS feed\r\n news = []\r\n ... | [
"0.665676",
"0.63081646",
"0.6112597",
"0.60895586",
"0.60594904",
"0.60477144",
"0.60260314",
"0.598809",
"0.5984063",
"0.59758997",
"0.5936038",
"0.5913608",
"0.58760685",
"0.5829383",
"0.58091927",
"0.5775324",
"0.5773286",
"0.5708184",
"0.56792194",
"0.56642944",
"0.56638... | 0.696534 | 0 |
Variable assignment can include assigning array elements. | def assign_variable(executor, variable, value):
variable = variable.replace(" ", "")
# TODO Should move parsing of this to ParsedStatementLet.
# TODO Need to handle N-dimensional array element assignment.
i = variable.find("(")
if i != -1:
# Array reference
j = variable.find(")", i+1... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texp... | [
"0.6699032",
"0.6667143",
"0.66010433",
"0.64327574",
"0.6394654",
"0.632295",
"0.62098926",
"0.61762494",
"0.6125874",
"0.6119721",
"0.60467184",
"0.6016822",
"0.5979976",
"0.5958759",
"0.58768624",
"0.57980937",
"0.5793912",
"0.5786944",
"0.57581115",
"0.5734274",
"0.573222... | 0.7048861 | 0 |
An if statement works by skipping to the next line, if the THEN clause is false, otherwise it continues to execute the clauses after the THEN. | def stmt_if(executor, stmt):
e = Expression()
result = e.eval(stmt._tokens, symbols=executor._symbols)
if not result:
executor.goto_next_line() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def end_ifeq(self):\n self.indent_left()\n self.write_line(\"endif\")",
"def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\... | [
"0.6381344",
"0.6276441",
"0.6248517",
"0.6244961",
"0.62422997",
"0.61599344",
"0.6136429",
"0.6136429",
"0.5794875",
"0.5792966",
"0.575992",
"0.57597315",
"0.5758323",
"0.5756217",
"0.573461",
"0.56686",
"0.5654149",
"0.5635589",
"0.5634545",
"0.563404",
"0.5616345",
"0.... | 0.741904 | 0 |
Calculate tips over past X amount of time and write JSON output | def aggregate_tips():
# The SQL query to perform
now = time.time()
print("Computing tip stats...", end="", flush=True)
labels = ["30_days", "7_days", "24_hours", "1_hour"]
windows = [30*86400.0, 7*86400.0, 1*86400.0, 3600.0]
result = {}
result["unix_time"] = now
result["human_time_utc"]... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_tip(meal_base, tip_rate):",
"def time_taken(json_cutlist, laser):\r\n\tcutlist = json.loads(json_cutlist)\r\n\ttime = 0\r\n\tcoordinate_array = [0, 0]\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\tcoordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) -... | [
"0.5688149",
"0.55947673",
"0.5528684",
"0.55030835",
"0.5432563",
"0.53793204",
"0.5325283",
"0.53094494",
"0.52589875",
"0.51901346",
"0.51684177",
"0.5168089",
"0.5163765",
"0.5146831",
"0.511869",
"0.5105191",
"0.50967735",
"0.5090349",
"0.5079822",
"0.5079822",
"0.507982... | 0.6560097 | 0 |
Publish files to somewhere on the internet. | def publish_files():
print("Publishing files to the internet...", end="", flush=True)
import subprocess
try:
subprocess.run("./upload.sh", timeout=120.0)
print("done.\n")
except:
print("failed.\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def publish(self, filename):\n # 1) Encrypt file\n # 2) Publish to remote cloud server\n # 3) Wait for the result\n # 4) Store results in files located inside RAM folder",
"def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system... | [
"0.74543023",
"0.6922994",
"0.6866991",
"0.676951",
"0.642343",
"0.6293285",
"0.6157238",
"0.6132221",
"0.60680485",
"0.59941494",
"0.5980355",
"0.5970285",
"0.59354484",
"0.5896326",
"0.58699507",
"0.5835118",
"0.5805512",
"0.58000696",
"0.57821625",
"0.5779796",
"0.57461995... | 0.77630585 | 0 |
Start the Microblaze Processor. The processor instance will start automatically after instantiation. | def start(self):
self.microblaze.run()
self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0)
self.load_switch_config(self.iop_switch_config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def startup(self):\n if self.initialize_mp:\n self.initialize_multiprocessing()\n self.startup_run()\n self.startup_finish()",
"def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)",
"def platfor... | [
"0.6218762",
"0.60426116",
"0.5927747",
"0.5888535",
"0.5852811",
"0.58245885",
"0.5803731",
"0.57714486",
"0.57650065",
"0.5744394",
"0.5729824",
"0.5724727",
"0.5718365",
"0.5661167",
"0.5647819",
"0.5586553",
"0.5540475",
"0.553082",
"0.551599",
"0.5512633",
"0.5512412",
... | 0.6880082 | 0 |
Put the Microblaze processor into reset. This method will set processor status as "STOPPED". | def stop(self):
self.microblaze.reset() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n # The camera will give no response to this command\n self._serial_io('\\x55\\x99\\x66\\x11', None)\n while True:\n try:\n self.system_state = 0x11\n if self.system_state == 0x11:\n break\n except CygnetEx... | [
"0.62783825",
"0.6214647",
"0.62101734",
"0.62012935",
"0.6188777",
"0.6144242",
"0.6094757",
"0.60869604",
"0.6043643",
"0.6032759",
"0.6026657",
"0.6011027",
"0.600109",
"0.59982294",
"0.5969956",
"0.59564924",
"0.5952704",
"0.59244686",
"0.5921813",
"0.5914156",
"0.5907323... | 0.70397276 | 0 |
Load the Microblaze processor's switch configuration. This method will update switch config. Each pin requires 8 bits for configuration. | def load_switch_config(self, config=None):
if config is None:
config = ARDUINO_SWCFG_DIOALL
elif not len(config) == 4*ARDUINO_SWITCHCONFIG_NUMREGS:
raise TypeError('Invalid switch config {}.'.format(config))
# Build switch config word
self.iop_switch_config = con... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError",
"def configure_switch(self, config):\n raise NotImplementedError",
"def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_... | [
"0.62282544",
"0.5851503",
"0.5757967",
"0.5553404",
"0.5420665",
"0.5405235",
"0.5385367",
"0.53103375",
"0.5305637",
"0.52973616",
"0.5270049",
"0.52667636",
"0.5264424",
"0.52271485",
"0.5182084",
"0.51805335",
"0.51612717",
"0.515244",
"0.5107933",
"0.50897145",
"0.507896... | 0.67974967 | 0 |
Returns the status of the Microblaze processor. Returns str The processor status ("IDLE", "RUNNING", or "STOPPED"). | def status(self):
return self.microblaze.state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_status(self):\n return self.read_register(259, 0, 3)",
"async def get_status(self) -> str:\n return await self.hw_device.status()",
"def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-ab... | [
"0.66442674",
"0.65349734",
"0.6357901",
"0.61985266",
"0.61985266",
"0.6195143",
"0.61590946",
"0.6113441",
"0.6109538",
"0.6081476",
"0.6031187",
"0.6031187",
"0.6031187",
"0.6031187",
"0.6031187",
"0.6031187",
"0.6031187",
"0.6031187",
"0.6031187",
"0.6031187",
"0.6031187"... | 0.692374 | 0 |
Check whether the command mailbox is idle. Returns bool True if the command in the mailbox is idle. | def is_cmd_mailbox_idle(self):
mb_cmd_word = self.microblaze.read(MAILBOX_OFFSET +
MAILBOX_PY2IOP_CMD_OFFSET)
return (mb_cmd_word & 0x1) == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def is_idle(self) -> bool:\n return (\n await self.send_command_and_read_reply(Protocol1Command(command=\"F\")) == \"Y\"\n )",
"def is_idle(self) -> bool:\n\n return self.get_runningstate == self.cmd.C815_IDLE_STATE",
"def is_idle(self) -> bool:",
"def is_idle(self) -> b... | [
"0.7600817",
"0.7516991",
"0.7223411",
"0.7089875",
"0.6917131",
"0.66209525",
"0.6557727",
"0.6422757",
"0.6312891",
"0.6253275",
"0.62224126",
"0.6158467",
"0.6102417",
"0.60107124",
"0.5923812",
"0.58288354",
"0.58169097",
"0.5816199",
"0.5791001",
"0.57627046",
"0.5757289... | 0.8497326 | 0 |
Computes the hamming distance for sequences in seqs_mat indicated by pairs of indices. | def nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths=True):
return _nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])",
"def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(... | [
"0.63828456",
"0.6093228",
"0.6056598",
"0.59507614",
"0.5795671",
"0.578761",
"0.57800555",
"0.574706",
"0.5681204",
"0.56678975",
"0.56452966",
"0.56424135",
"0.563103",
"0.558626",
"0.55659384",
"0.5539205",
"0.5504324",
"0.5493676",
"0.5407647",
"0.5401868",
"0.53662705",... | 0.6753979 | 0 |
Computes the Levenshtein edit distance between two sequences, with the AA substitution distances provided in distance_matrix. The default distance matrix has a 1 for mismatches and 0 for matches. | def nb_editdistance(seq_vec1, seq_vec2, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):
q_L = seq_vec1.shape[0]
s_L = seq_vec2.shape[0]
if q_L == s_L:
"""No gaps: substitution distance
This will make it differ from a strict edit-distance since
the optimal edit-dista... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list... | [
"0.6711283",
"0.6683752",
"0.6674949",
"0.6592512",
"0.6581954",
"0.64532",
"0.64532",
"0.6445244",
"0.6432772",
"0.64265794",
"0.63696915",
"0.6358784",
"0.62909234",
"0.62251955",
"0.6190301",
"0.61244994",
"0.6103599",
"0.6082008",
"0.60701114",
"0.60523444",
"0.60067487",... | 0.6989441 | 0 |
Compute "tcrdist" distance between two TCR CDR3 sequences. Using default weight, gap penalty, ntrim and ctrim is equivalent to the original distance published in Dash et al, (2017). By setting ntrim and ctrim to 0 and adjusting the dist_weight, it is also possible to compute the CDR1/2 loop distances which can be combi... | def nb_tcrdist(seq_vec1, seq_vec2, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):
q_L = seq_vec1.shape[0]
s_L = seq_vec2.shape[0]
if q_L == s_L:
"""No gaps: substitution distance"""
tmp_dist = 0
for i in range(ntrim, q_L - ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n ... | [
"0.53767455",
"0.4826406",
"0.47805288",
"0.4753317",
"0.47484082",
"0.46879807",
"0.46756828",
"0.46334925",
"0.45374662",
"0.4479596",
"0.44660226",
"0.44558287",
"0.44372138",
"0.44101122",
"0.43605304",
"0.4340066",
"0.43351212",
"0.4289836",
"0.4284168",
"0.42829153",
"0... | 0.6212199 | 0 |
Computes the tcrdist distance for sequences in seqs_mat indicated by pairs of indices. | def nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):
return _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n ... | [
"0.6867072",
"0.6173414",
"0.61204684",
"0.59654254",
"0.5629023",
"0.5607086",
"0.55902237",
"0.55837727",
"0.55610365",
"0.5435831",
"0.54259413",
"0.5407338",
"0.5377603",
"0.5373048",
"0.5354197",
"0.5333928",
"0.5265103",
"0.52603585",
"0.5210351",
"0.5201209",
"0.518305... | 0.6814871 | 1 |
Computes the Levenshtein edit distance for sequences in seqs_mat indicated by pairs of indices. | def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):
#print(indices.shape)
#print(seqs_mat.shape)
#print(seqs_L.shape)
return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomo... | [
"0.7585912",
"0.65193325",
"0.6018155",
"0.5973261",
"0.5960617",
"0.5927266",
"0.5902716",
"0.5879615",
"0.5879615",
"0.5809641",
"0.57759446",
"0.5760104",
"0.57229745",
"0.5714925",
"0.5702276",
"0.5691489",
"0.5681923",
"0.5659949",
"0.5631824",
"0.56016165",
"0.55863863"... | 0.7149528 | 1 |
Store the names and grades of school students. | def __init__(self):
self.students = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()",
"def __init__(self):\n self.students = []\n self.grades = {}\n self.isSorted = True",
"def add_student():\n\n\... | [
"0.70357305",
"0.61918634",
"0.6180321",
"0.61655444",
"0.61187875",
"0.6078942",
"0.6064046",
"0.60455054",
"0.60356414",
"0.60095865",
"0.59675676",
"0.5928267",
"0.5887183",
"0.5879371",
"0.5865206",
"0.58481586",
"0.58402777",
"0.5822257",
"0.5813019",
"0.5812019",
"0.580... | 0.6250706 | 1 |
Add a student to a grade in the roster. | def add_student(self, name: str, grade: int) -> None:
school_grade = self.students.setdefault(grade, [])
school_grade.append(name)
school_grade.sort() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')",
"def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n e... | [
"0.812951",
"0.7889351",
"0.7747675",
"0.75414544",
"0.7533531",
"0.74755746",
"0.74255633",
"0.7281956",
"0.7157385",
"0.70832974",
"0.70647204",
"0.7050774",
"0.7042668",
"0.70276797",
"0.68648064",
"0.67618066",
"0.6695062",
"0.661738",
"0.6594988",
"0.65615714",
"0.653173... | 0.7927675 | 1 |
Computing initial values for position and velocity in GCRS system This is for later use in orbit integration, from tables in the prediction files. Use a lagrange polynomial in order to interpolate in the tables. | def calculate_initial_values(eph, rundate):
data = sorted(eph["positions"].items())
pos_itrs = np.zeros((len(data), 3))
mjd1, mjd2 = zip(*[t for t, d in data])
rotation_mat = rotation.trs2gcrs(time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc"))
tbl = time.Time(val=mjd1, val2=mjd2, fmt="mjd", sca... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize(self):\n self.positions = self._generate_initial_positions()\n self.scores = np.array(self.compute_scores(self.positions))\n\n self._pso_data.best_positions = self.positions\n self._pso_data.best_scores = self.scores\n\n magic_constant = 2 # feel free to change FI... | [
"0.6120319",
"0.6116722",
"0.5932199",
"0.5788035",
"0.57826406",
"0.57638705",
"0.5757198",
"0.5753652",
"0.57429856",
"0.57152313",
"0.57009196",
"0.5668376",
"0.5657558",
"0.5629421",
"0.5624367",
"0.5592595",
"0.5591798",
"0.5582761",
"0.5570598",
"0.55646664",
"0.5564146... | 0.6997615 | 0 |
Do the initialization and setup for building a postage stamp. In the base class, we check for and parse the appropriate size and position values in config (aka base['stamp'] or base['image']. Values given in base['stamp'] take precedence if these are given in both places (which would be confusing, so probably shouldn't... | def setup(self, config, base, xsize, ysize, ignore, logger):
# .. Do any custom setup you need to do.
# Probably want to call the base class setup function to do the normal determination
# of the size and position values.
# Extra processing of 'bandpass' argument
# Most needed t... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup(self, config, base, file_num, logger):\n # This is a copy of the base class code\n seed = galsim.config.SetupConfigRNG(base, logger=logger)\n logger.debug('file %d: seed = %d',file_num,seed)\n\n if 'det_num' not in config:\n config['det_num'] = { 'type': 'Sequence',... | [
"0.5815659",
"0.5406954",
"0.53990644",
"0.53855544",
"0.5383662",
"0.5338992",
"0.5320965",
"0.5303831",
"0.5302118",
"0.5297782",
"0.52322584",
"0.51910156",
"0.51902056",
"0.5185881",
"0.5184995",
"0.5149911",
"0.5142809",
"0.51350987",
"0.51125443",
"0.5085775",
"0.508431... | 0.64838976 | 0 |
Before drawing the profile, see whether this object can be trivially skipped. The base method checks if the object is completely off the main image, so the intersection bounds will be undefined. In this case, don't bother drawing the postage stamp for this object. prof The profile to draw. image The image onto which to... | def updateSkip(self, prof, image, method, offset, config, base, logger):
# NOTE: There are currently unresolved issues with the image size checking of chromatic
# objects. For now, we ignore any possible speed increases and skip the check.
# if isinstance(prof, galsim.ChromaticObject):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw(self, prof, image, method, offset, config, base, logger, **kwargs):\n # ... draw prof onto the given image (making a new Image if necessary)\n if prof is None:\n return image\n else:\n logger = galsim.config.LoggerWrapper(logger)\n # Setup the kwargs t... | [
"0.6533734",
"0.51599497",
"0.47828445",
"0.47232333",
"0.45972005",
"0.45731962",
"0.45694324",
"0.45572576",
"0.45465982",
"0.45340365",
"0.44940332",
"0.44605252",
"0.44413173",
"0.44082165",
"0.4281743",
"0.4263862",
"0.42636248",
"0.42611814",
"0.42450166",
"0.42342076",
... | 0.67846763 | 0 |
Draw the profile on the postage stamp image. This is a slightly modified version of `stamp.DrawBasic()` which allows drawing of chromatic objects. prof The profile to draw. image The image onto which to draw the profile (which may be None). method The method to use in drawImage. offset The offset to apply when drawing.... | def draw(self, prof, image, method, offset, config, base, logger, **kwargs):
# ... draw prof onto the given image (making a new Image if necessary)
if prof is None:
return image
else:
logger = galsim.config.LoggerWrapper(logger)
# Setup the kwargs to pass to d... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updateSkip(self, prof, image, method, offset, config, base, logger):\n\n # NOTE: There are currently unresolved issues with the image size checking of chromatic\n # objects. For now, we ignore any possible speed increases and skip the check.\n # if isinstance(prof, galsim.ChromaticObject):... | [
"0.56249416",
"0.49046072",
"0.4773243",
"0.4728293",
"0.47255272",
"0.46967715",
"0.460233",
"0.45648476",
"0.45566934",
"0.45022318",
"0.44824857",
"0.44786713",
"0.44598132",
"0.4454975",
"0.44341892",
"0.4383001",
"0.43778774",
"0.43481213",
"0.43244997",
"0.43213403",
"0... | 0.70576626 | 0 |
Take a draft_dict that was already validated by draft_dict_validator then further sanitize, validate, and transform it. Ultimately return this "further validated" draft dict. It will have a slightly different set of keys the values for which can be used to directly create a Draft object. | def further_validated_draft_dict(
draft_dict: Dict[str, Any], user_profile: UserProfile
) -> Dict[str, Any]:
content = normalize_body(draft_dict["content"])
timestamp = draft_dict.get("timestamp", time.time())
timestamp = round(timestamp, 6)
if timestamp < 0:
# While it's not exactly an in... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _draft_from_response(data):\n return Draft(\n uuid=UUID(data['uuid']),\n bundle_uuid=UUID(data['bundle_uuid']),\n name=data['name'],\n updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),\n files={\n path: DraftFile(path=path, **file)\n ... | [
"0.56550765",
"0.56206477",
"0.5450413",
"0.52529806",
"0.52475804",
"0.5194902",
"0.51102144",
"0.51091665",
"0.50821674",
"0.5033902",
"0.5002",
"0.49568045",
"0.4842201",
"0.48251075",
"0.48187992",
"0.48105076",
"0.48069793",
"0.47712836",
"0.47657195",
"0.47256124",
"0.4... | 0.7376656 | 0 |
Create drafts in bulk for a given user based on the draft dicts. Since currently, the only place this method is being used (apart from tests) is from the create_draft view, we assume that the drafts_dicts are syntactically valid (i.e. they satisfy the draft_dict_validator). | def do_create_drafts(draft_dicts: List[Dict[str, Any]], user_profile: UserProfile) -> List[Draft]:
draft_objects = []
for draft_dict in draft_dicts:
valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)
draft_objects.append(
Draft(
user_profile=use... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_draft(auth, subject, body, addresses, user_id, cc_addresses=[], attachments_list=None):\r\n data = {}\r\n data['Subject'] = subject\r\n data['Body'] = {}\r\n data['Body']['ContentType'] = 'HTML'\r\n data['Body']['Content'] = body\r\n data['ToRecipients'] = [{'EmailAddress': {'Address':... | [
"0.56473714",
"0.55276287",
"0.5469983",
"0.5152256",
"0.5134393",
"0.5032918",
"0.4999463",
"0.49490035",
"0.49176887",
"0.49063164",
"0.48992178",
"0.48911917",
"0.48814285",
"0.48810473",
"0.48643064",
"0.48627475",
"0.48573655",
"0.48487023",
"0.48398086",
"0.48395732",
"... | 0.7956114 | 0 |
Edit/update a single draft for a given user. Since the only place this method is being used from (apart from tests) is the edit_draft view, we assume that the drafts_dict is syntactically valid (i.e. it satisfies the draft_dict_validator). | def do_edit_draft(draft_id: int, draft_dict: Dict[str, Any], user_profile: UserProfile) -> None:
try:
draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)
except Draft.DoesNotExist:
raise ResourceNotFoundError(_("Draft does not exist"))
valid_draft_dict = further_validate... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit_draft(self):\r\n EmptyPromise(\r\n lambda: self.q(css='.create-draft').present,\r\n 'Wait for edit draft link to be present'\r\n ).fulfill()\r\n\r\n self.q(css='.create-draft').first.click()\r\n\r\n EmptyPromise(\r\n lambda: self.q(css='.editing... | [
"0.63595945",
"0.6280873",
"0.623346",
"0.62005955",
"0.618698",
"0.61712956",
"0.616757",
"0.6136594",
"0.6136562",
"0.61018544",
"0.60939956",
"0.60843796",
"0.5883826",
"0.58486557",
"0.5807267",
"0.57433337",
"0.5733624",
"0.5710894",
"0.56908655",
"0.56681085",
"0.563653... | 0.7761259 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.