query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Iterate through all accesses by ``for i in scope.accesses``.
def __iter__(self) -> Iterator[Access]: for accesses in self._accesses.values(): for access in accesses: yield access
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n while not self.accesses.empty():\n yield self.accesses.get()", "def recursive_accesses(self, depth, max_depth=-1, external=True):\n # pylint: disable=not-an-iterable\n if max_depth == -1:\n max_depth = float('inf')\n\n for access in self.fil...
[ "0.72342896", "0.6824232", "0.654937", "0.654937", "0.61959404", "0.56890184", "0.5652105", "0.56444854", "0.55087644", "0.54611427", "0.54452384", "0.5443628", "0.53988117", "0.537276", "0.5348753", "0.5241717", "0.52374184", "0.5205386", "0.51431155", "0.5113757", "0.507582...
0.78320724
0
A helper function to retrieve simple name str from a CSTNode or str
def get_name_for(node: Union[str, cst.CSTNode]) -> Optional[str]: if isinstance(node, cst.Name): return node.value elif isinstance(node, str): return node elif isinstance(node, cst.Call): return _NameUtil.get_name_for(node.func) elif isinstance(node, c...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(node):\n\n return fst(node)", "def get_name() -> str:", "def get_name(node):\n if isinstance(node, ast.Name):\n return node.id", "def nodeToShortName(node):\n\n pass", "def get_name():", "def nodeToLongName(node):\n\n pass", "def node_getname( fdt, node_number_or...
[ "0.7268304", "0.7049384", "0.67698145", "0.6742271", "0.67241335", "0.66804475", "0.66666675", "0.66269785", "0.65419465", "0.6449938", "0.64347196", "0.6416907", "0.6346482", "0.6314005", "0.6314005", "0.6314005", "0.6314005", "0.6242925", "0.6222061", "0.6213705", "0.619309...
0.7702267
0
Returns true if ``node`` is part of the assignment at ``assignment_node``. Normally this is just a simple identity check, except for imports where the assignment is attached to the entire import statement but we are interested in ``Name`` nodes inside the statement.
def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool: if node is assignment_node: return True if isinstance(assignment_node, (cst.Import, cst.ImportFrom)): aliases = assignment_node.names if isinstance(aliases, cst.ImportStar): return False for ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)", "def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)", "def is_assignment(*args):\n return _ida_he...
[ "0.62317604", "0.60697824", "0.5954752", "0.5715725", "0.55724084", "0.55121505", "0.5378449", "0.5343535", "0.53163487", "0.5311576", "0.53089356", "0.52855116", "0.5275552", "0.5273492", "0.52600914", "0.52340776", "0.520116", "0.51995283", "0.5169611", "0.5149941", "0.5117...
0.836117
0
Returns whether it successfully handled the string annotation
def _handle_string_annotation( self, node: Union[cst.SimpleString, cst.ConcatenatedString] ) -> bool: if ( self.__in_type_hint_stack[-1] or self.__in_annotation_stack[-1] ) and not self.__in_ignored_subscript: value = node.evaluated_value if value: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_action_str(string: str) -> bool:", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def simple(self) -> bool:\n return is_simple(self.string)", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:]...
[ "0.65544593", "0.6309469", "0.6091547", "0.6053214", "0.6043687", "0.60382533", "0.6006412", "0.5999586", "0.59370255", "0.5861694", "0.5861107", "0.58280134", "0.58182687", "0.5812485", "0.57880664", "0.57711285", "0.5747984", "0.57046604", "0.56872684", "0.56801474", "0.567...
0.74615365
0
Test the reading of passwords.
def test_read(sqlite_db): site = "www.example.com" passwd = smm.read_passwd(site) assert passwd == "TheNewPassword" bad_request = smm.read_passwd("NotASite") assert not bad_request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def test_valid_password(self):\n pass_fi...
[ "0.729323", "0.72582704", "0.71649766", "0.6846707", "0.6810091", "0.67917", "0.67917", "0.6777287", "0.6760344", "0.673974", "0.67032176", "0.6686708", "0.6638984", "0.66241276", "0.6607445", "0.6604479", "0.65660244", "0.6564443", "0.65401316", "0.65349746", "0.65200394", ...
0.7469244
0
Test the removal of passwords.
def test_removal(sqlite_db): site = "www.example.com" response = smm.remove_passwd(site) assert response bad_response = smm.remove_passwd(site) assert not bad_response assert not smm.read_passwd(site)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_password(self):\n pass", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True"...
[ "0.6817206", "0.666834", "0.6652047", "0.65106744", "0.64689183", "0.64474946", "0.64468163", "0.6384452", "0.63637775", "0.6348568", "0.63416636", "0.63272005", "0.63171124", "0.6305562", "0.6297313", "0.6289528", "0.627948", "0.6237605", "0.6184281", "0.61652255", "0.612686...
0.78059477
0
inserts new course object into linked list
def insert(self, course): new_node = Node(course) if self.head is None or self.head.data.number() >= new_node.data.number(): new_node.next = self.head self.head = new_node self._size += 1 return cur_node = self.head while cur_node.next an...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add():\n prev_courses = Course._file.read_db()\n course_name = input(\"Please, type course name >\")\n # check course for uniqueness/ instantiating blank class with one attribute\n c = Course(course_name)\n if c.is_course_exists():\n print(\"{} is already exists\"....
[ "0.7108956", "0.6971245", "0.6751544", "0.6639195", "0.6605063", "0.6451658", "0.6447333", "0.6298767", "0.62383056", "0.6146749", "0.6086189", "0.608164", "0.6031581", "0.60168093", "0.5997468", "0.5956669", "0.5928258", "0.5841721", "0.580762", "0.57517105", "0.5742702", ...
0.74338984
0
returns True if the list is sorted by Course Number, False otherwise
def is_sorted(self): cur_list = [] cur_node = self.head while cur_node is not None: cur_list.append(cur_node.data.number()) cur_node = cur_node.next if cur_list == sorted(cur_list): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_sorted_list(list_):\n prev = -1\n for item in list_:\n if item < prev:\n return False\n prev = item\n return True", "def is_sorted(l: list):\n for idx, num in enumerate(l):\n if idx is 0:\n continue\n elif l[idx-1] <= num:\n continu...
[ "0.64845145", "0.63505226", "0.6258908", "0.6216176", "0.6167742", "0.610778", "0.6065763", "0.60476375", "0.60458887", "0.5988047", "0.5985882", "0.59754354", "0.59156555", "0.5914994", "0.58944833", "0.5874584", "0.5858344", "0.58307153", "0.57908547", "0.57533205", "0.5724...
0.6650408
0
Checks that the right number of sections exist. The prologue before the first section is 0, while subsequent ones are 1, 2, 3, etc. So if you have 3 sections in your code plus the prologue, you should pass in 3 and not 4 to verify that all of them exist.
def check_section_exists(section_number, report=None): if report is None: report = MAIN_REPORT if not report['source']['success']: return False found = int((len(report['source']['sections']) - 1) / 2) if section_number > found: report.attach('Syntax error', category='Syntax', too...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testSectionCount(self):\n\n self.sectionCount(3640)", "def check_pe_sections(self, pe):\n res = []\n for section in pe.sections:\n if b\"!This program cannot be run in DOS mode\" in section.get_data()[:400] or\\\n b\"This program must be run under Win32\" in...
[ "0.6215828", "0.61386", "0.578303", "0.5665832", "0.56310284", "0.56031877", "0.55833596", "0.5556951", "0.5545726", "0.5543077", "0.5448445", "0.54293984", "0.53988886", "0.53918827", "0.53827995", "0.53756946", "0.5323125", "0.52977574", "0.52821344", "0.5256953", "0.523906...
0.6235399
0
Returns a xapian index document from the context. Introspecting the connection provides the relevant fields available.
def document(self, connection): doc = xappy.UnprocessedDocument() for iface in providedBy(self.context): for field in schema.getFields(iface).values(): if not isinstance(field, (schema.Text, schema.ASCII)): continue value = field.query(self...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_index(self, uid: str) -> Index:\n return Index(self.config, uid).fetch_info()", "def as_search_document(self, index='_all'):\n raise NotImplementedError(\n \"{} does not implement 'get_search_document'.\".format(self.__class__.__name__)\n )", "def document_indexer(self):...
[ "0.6285873", "0.58124757", "0.57936424", "0.5732638", "0.56942385", "0.56683356", "0.5626097", "0.5600954", "0.55877274", "0.55677587", "0.5518595", "0.5501073", "0.54690313", "0.54614365", "0.54614365", "0.5457262", "0.5432552", "0.5432552", "0.5432552", "0.5432552", "0.5432...
0.60662013
1
Function to check if the url provided is valid tries to scrape the page for the error message (403) and if found returns False (meaning it's not a valid page) else returns true
def check_url(url: str) -> bool: try: potential_error = driver.find_element_by_xpath("/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div").text if '403' in potential_error: return True except: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:", "def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n...
[ "0.7925075", "0.7531995", "0.7530058", "0.7316157", "0.72739685", "0.72567844", "0.71402746", "0.7119127", "0.70845497", "0.70514876", "0.70416003", "0.70365214", "0.7012856", "0.7010799", "0.6940691", "0.69313127", "0.6929111", "0.68956256", "0.6881502", "0.6879656", "0.6854...
0.86800694
0
Tries to click the giving button, using try/except for error catching
def click_button(button_to_click): try: button_to_click.click() except: print("Button not found")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def atomacclick(objecttoclick):\n try:\n objecttoclick.Press()\n #print \"clicked on : %s\" %objecttoclick\n except Exception as er:\n print \"Not able to click on: %s\" %objecttoclick", "def clickonbutton(titleobj, buttontoclick):\n try:\n ldtp.click(titleobj,buttontoclick)\...
[ "0.6922278", "0.6865346", "0.6652217", "0.66007996", "0.65745544", "0.6573612", "0.649841", "0.648705", "0.6485279", "0.6460392", "0.63917434", "0.6329999", "0.6280454", "0.6223838", "0.6120003", "0.607597", "0.60168874", "0.60052997", "0.60030556", "0.5995394", "0.59888935",...
0.8195593
0
Scrpaes the amenities for the web page given the xpath. Returns a list
def find_amenities(xpath: str) -> list: amenities = driver.find_elements_by_xpath(xpath) return [amenitie.text for amenitie in amenities]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_amenities(self, url: str) -> None:\n page_source = self.get_page_source(url, \"gmnoprint\")\n soup = BeautifulSoup(page_source, \"html.parser\")\n\n # Get latitude and longitude data\n self.get_coordinates(soup)\n\n # Open amenities url and collect additional data\n ...
[ "0.6214858", "0.6088792", "0.59633666", "0.57775974", "0.5686356", "0.5672884", "0.56672996", "0.56397474", "0.56136924", "0.5509259", "0.5411635", "0.54108214", "0.53939843", "0.5365997", "0.5331163", "0.53094465", "0.530245", "0.52577066", "0.5229415", "0.51995695", "0.5191...
0.7940574
0
Returns the text of a given tag / class id within a Beautiful Soup objhect()
def find_data_in_soup(soup, tag: str, class_id:str) -> str: return soup.find(tag, class_=class_id).get_text()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def text_of(soup):\n return ''.join([str(x) for x in soup.findAll(text=True)])", "def getHTMLTag(self, html, tag):\n soup = BeautifulSoup(html, 'html.parser')\n content = soup.find(tag)\n return content", "def get_text_only(self, soup):\n val = soup.string\n # see if we ha...
[ "0.65025955", "0.64949864", "0.64455515", "0.6300595", "0.624028", "0.61896014", "0.61368173", "0.6121002", "0.60955775", "0.6071008", "0.6028691", "0.59635437", "0.5934924", "0.5929046", "0.5905635", "0.5897759", "0.5881083", "0.5818709", "0.581585", "0.580303", "0.57516354"...
0.8107418
0
Create the large attribute dictionary.
def create_data_set(num_attributes): data_set = {} for index in range(num_attributes): size = random.randint(1, 10) # nosec key = str(index).encode("utf-8") data_set[key] = get_random_bytes(size) return data_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_attributes(self):\n self.attr = {\n 'name': None,\n 'tags': [],\n 'openHours': None,\n 'type': None,\n 'parent': None,\n 'locationId': None,\n 'bannerAbbreviation': None,\n 'arcGisAbbreviation': None,\n ...
[ "0.6525995", "0.64443743", "0.6089407", "0.60877615", "0.60155773", "0.5991732", "0.58783555", "0.58489907", "0.58311176", "0.5822948", "0.5812633", "0.5803778", "0.57970864", "0.578953", "0.57819146", "0.5750089", "0.57281816", "0.57055736", "0.57011706", "0.5686207", "0.568...
0.65942544
0
verify the Attributes value after get_attr
def verify_get_attr(self, indata, outdata): decoded = {} for key, val in outdata.items(): if isinstance(val, bytes): decoded[key.decode()] = val else: decoded[key] = base64.b64decode(val) self.log.info("Verifying get_attr output:") ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_attributes(self):\n pass", "def test_get_attribute_data(self):\n pass", "def GetValidAttributeValues(self, attr):\n return None", "def verify_get_attr(self, indata, outdata):\n decoded = {}\n for key, val in outdata.items():\n if isinstance(val, byte...
[ "0.71446437", "0.70372206", "0.696676", "0.6889947", "0.68315357", "0.6809947", "0.67375857", "0.6713587", "0.66863", "0.6556088", "0.65475464", "0.65393233", "0.6483959", "0.6472178", "0.64524734", "0.64095545", "0.6385835", "0.63662297", "0.63074434", "0.6280292", "0.626760...
0.7044167
1
check for command result, raise failure when error encountered
def check_result(self, result): self.log.info("--check_result, result= %s", result) if result[0]['exit_status'] != 0: self.fail("##Error detected from check_result") else: self.log.info("--check_result passed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_check(command):\r\n\r\n # TODO\r", "def __try_command(cmd, description):\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT);\n return (True, out.decode(\"utf-8\")) # success\n except subprocess.CalledProcessError as e:\n print(\"Error while {:s}, return ...
[ "0.7622423", "0.7359788", "0.7271346", "0.72338516", "0.7085878", "0.70216614", "0.70130956", "0.6983405", "0.69500047", "0.6859855", "0.6827278", "0.68127596", "0.6743249", "0.67426986", "0.6713755", "0.67079455", "0.6696678", "0.66868126", "0.6676383", "0.66693264", "0.6666...
0.7674272
0
Upgrade hosts via repository or RPMs
def upgrade(self, servers, clients): if ".repo" in self.upgrade_repo: repo_2 = self.upgrade_repo repo_1 = self.downgrade_repo self.updowngrade_via_repo(servers, clients, repo_1, repo_2) else: all_hosts = servers + clients self.updowngrade_via_r...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade_and_downgrade(self, fault_on_pool_upgrade=False):\n # (1)Setup\n self.log.info(\"(1)==Setup and show rpm, dmg and daos versions on all hosts.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server...
[ "0.65535074", "0.6516614", "0.6511681", "0.64082664", "0.6343064", "0.6275903", "0.625844", "0.6231782", "0.6076686", "0.6070641", "0.6041249", "0.6015797", "0.6015797", "0.6012341", "0.59814286", "0.5954745", "0.5914903", "0.59111565", "0.58789015", "0.5869267", "0.5842611",...
0.7090284
0
Downgrade hosts via repository or RPMs
def downgrade(self, servers, clients): if ".repo" in self.upgrade_repo: repo_1 = self.upgrade_repo repo_2 = self.downgrade_repo self.updowngrade_via_repo(servers, clients, repo_1, repo_2) else: all_hosts = servers + clients self.updowngrade_via...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self, servers, clients):\n if \".repo\" in self.upgrade_repo:\n repo_2 = self.upgrade_repo\n repo_1 = self.downgrade_repo\n self.updowngrade_via_repo(servers, clients, repo_1, repo_2)\n else:\n all_hosts = servers + clients\n self.upd...
[ "0.6603967", "0.6249081", "0.6235765", "0.6235765", "0.62223345", "0.62223345", "0.62223345", "0.61755437", "0.6065216", "0.6048611", "0.6013978", "0.5956582", "0.5954306", "0.5892654", "0.58716166", "0.5861164", "0.5860646", "0.58190066", "0.57984877", "0.57966673", "0.57800...
0.6965447
0
To display daos and dmg version, and check for error.
def daos_ver_after_upgraded(self, host): cmds = [ "daos version", "dmg version", "daos pool query {}".format(self.pool.identifier)] for cmd in cmds: self.log.info("==cmd= %s", cmd) result = pcmd(host, cmd, False) if 0 not in result ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_version():\n print(\"===============================================================\")\n print(f\"Productivity App v{__VERSION__}\")\n print(f\"Made by {__AUTHOR__} (with the advices of {__ADVISOR__})\")\n print(\"Source : https://github.com/Ilade-s/productivite-app-TkVer\")\n print(\"Serv...
[ "0.61408764", "0.60227036", "0.5977592", "0.59699047", "0.5859191", "0.5809201", "0.5670012", "0.56505746", "0.56443745", "0.5622813", "0.5611506", "0.5571046", "0.55378354", "0.5531627", "0.54901385", "0.54891825", "0.54883015", "0.54713386", "0.54710495", "0.5470672", "0.54...
0.6510692
0
Verify daos and libdaos interoperability between different version of agent and server.
def verify_daos_libdaos(self, step, hosts_client, cmd, positive_test, agent_server_ver, exp_err=None): if positive_test: self.log.info("==(%s)Positive_test: %s, on %s", step, cmd, agent_server_ver) else: self.log.info("==(%s)Negative_test: %s, on %s", ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upgrade_shared_dependencies(self):\n result = self.run_cli_command(\"-s\", \"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n\n agent_config: AgentConfig = cast(\n AgentConfig,\n load_item_config(PackageType.AGENT.value, Path(self.current_agent_co...
[ "0.6237617", "0.61974806", "0.5944649", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.58433425", "0.56415945", ...
0.6391706
0
Check if RPMs with faultinjection function.
def has_fault_injection(self, hosts): status = True result = run_pcmd(hosts, "daos_debug_set_params -v 67174515") self.log.info("--check_result, result= %s", result) if result[0]['stdout'] == []: self.log.info("#Host client rpms did not have fault-injection") stat...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _isfault(self):\n return self.dp.state()==PyTango.DevState.FAULT", "def check_page_faults(con, host, warning, critical,perf_data):\n warning = warning or 10\n critical = critical or 30\n data=get_server_status(con)\n\n try:\n page_faults=float(data['extra_info']['page_faults']) \n ...
[ "0.5820415", "0.5700631", "0.5693373", "0.5601475", "0.5494928", "0.5375606", "0.5362572", "0.5336421", "0.5294676", "0.52919555", "0.52540994", "0.5233826", "0.5226672", "0.52215457", "0.52044386", "0.5198198", "0.5196629", "0.5195433", "0.5191071", "0.51856494", "0.51808774...
0.66173726
0
Enable and disable fault injection.
def enable_disable_fault_injection(self, hosts, enable=True): if enable: result = run_pcmd(hosts, "daos_debug_set_params -v 67174515") else: result = run_pcmd(hosts, "daos_debug_set_params -v 67108864") self.check_result(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fault_debug(value: bool = False) -> None:", "def firewallOff():\n pass", "def panic_on_fault_enabled(self):\n # The panic_on_fault mechanism might not even be included in the build\n # (in which case the panic_on_fault variables won't exist), so be defensive.\n try:\n ena...
[ "0.5727856", "0.55507016", "0.5469473", "0.5432097", "0.54003435", "0.53948396", "0.52492994", "0.51916826", "0.51761824", "0.51750493", "0.51594543", "0.5149204", "0.5145031", "0.51321125", "0.51234275", "0.5111981", "0.5108552", "0.50997925", "0.50989044", "0.50777584", "0....
0.6993913
0
Verify pool upgrade status.
def verify_pool_upgrade_status(self, pool_id, expected_status): prop_value = self.get_dmg_command().pool_get_prop( pool_id, "upgrade_status")['response'][0]['value'] if prop_value != expected_status: self.fail("##prop_value != expected_status {}".format(expected_status))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_package_status(self):\n pass", "def verify_package_status(self):\n pass", "def verify_pool(self, pool):\n svc = self.pool_path % pool\n self.rest_get(svc, restclient.Status.OK)", "def test_version_check_update_available(self):\n output = self.run_command(\"selfup...
[ "0.6395156", "0.6395156", "0.6306147", "0.6207196", "0.62052345", "0.5972069", "0.5907427", "0.5898982", "0.5890571", "0.5863571", "0.58613586", "0.58581054", "0.583659", "0.5818412", "0.5748545", "0.5742679", "0.5731763", "0.5725801", "0.56647915", "0.5657188", "0.56565666",...
0.7941663
0
Execute dmg pool upgrade with fault injection.
def pool_upgrade_with_fault(self, hosts, pool_id): # Verify pool status before upgrade expected_status = "not started" self.verify_pool_upgrade_status(pool_id, expected_status) # Enable fault-injection self.enable_disable_fault_injection(hosts, enable=True) # Pool upgra...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade_and_downgrade(self, fault_on_pool_upgrade=False):\n # (1)Setup\n self.log.info(\"(1)==Setup and show rpm, dmg and daos versions on all hosts.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server...
[ "0.7017949", "0.62637675", "0.61918133", "0.5806434", "0.5690036", "0.56347877", "0.53812015", "0.5361644", "0.53614783", "0.535473", "0.5353267", "0.53437454", "0.53437454", "0.53120893", "0.5283165", "0.5260421", "0.5251918", "0.5241946", "0.51609534", "0.51303685", "0.5122...
0.73163605
0
Interoperability of different versions of DAOS agent and server.
def diff_versions_agent_server(self): # (1)Setup self.log.info("==(1)Setup, create pool and container.") hosts_client = self.hostlist_clients hosts_server = self.hostlist_servers all_hosts = include_local_host(hosts_server | hosts_client) self.upgrade_repo = self.params.g...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def server_agent():", "def test_upgrade_shared_dependencies(self):\n result = self.run_cli_command(\"-s\", \"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n\n agent_config: AgentConfig = cast(\n AgentConfig,\n load_item_config(PackageType.AGENT.value, P...
[ "0.68752897", "0.55359006", "0.5426499", "0.5346168", "0.53395516", "0.52728474", "0.52551156", "0.52248836", "0.5213356", "0.5169228", "0.5133192", "0.51267165", "0.51057464", "0.5069011", "0.50680554", "0.50431585", "0.50287765", "0.5025044", "0.49971265", "0.49971265", "0....
0.64597714
1
Get the mode of a categorical and cast to onehot.
def one_hot_categorical_mode(logits): dist = tfd.Categorical(logits) return tf.cast(tf.one_hot(dist.mode(), dist.event_size), tf.float32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_categorical(x, n_col=None):\n if not n_col:\n n_col = np.amax(x) + 1\n one_hot = np.zeros((x.shape[0], n_col))\n one_hot[np.arange(x.shape[0]), x] = 1\n return one_hot", "def to_categorical(target: np.ndarray, n_classes: int = None) -> np.ndarray:\n\tn_classes = n_classes if n_classes i...
[ "0.7203805", "0.7131759", "0.69734716", "0.69353926", "0.6869692", "0.68215334", "0.6793382", "0.6789208", "0.67846286", "0.67513657", "0.67081213", "0.66956395", "0.6684978", "0.6679585", "0.6672036", "0.6661204", "0.6650561", "0.66481876", "0.66481876", "0.66481876", "0.664...
0.77782106
0
Given the sys.argv as a list of strings, this method returns the sublist right after the '' element (if present, otherwise returns an empty list).
def _get_argv_after_doubledash(self): try: idx = sys.argv.index("--") return sys.argv[idx+1:] # the list after '--' except ValueError as e: # '--' not in the list: return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_argv_list(args):\n # parse the string format of arguments and return a list of arguments\n argv = args.split(' ')\n if len(argv) == 1 and argv[0] == '':\n return []\n return argv", "def pop_first_arg(argv):\n for arg in argv:\n if not arg.startswith('-'):\n argv.re...
[ "0.6772655", "0.62367713", "0.61961806", "0.61672586", "0.6163589", "0.6071635", "0.5893925", "0.58778197", "0.5852514", "0.5829911", "0.58101344", "0.5773826", "0.5649536", "0.5600912", "0.55947906", "0.55472255", "0.5522556", "0.551393", "0.5505596", "0.54859275", "0.542388...
0.74402696
0
This method is expected to behave identically as in the superclass, except that the sys.argv list will be preprocessed using _get_argv_after_doubledash before. See the docstring of the class for usage examples and details.
def parse_args(self): return super().parse_args(args=self._get_argv_after_doubledash())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_from_argv(self, argv):\r\n self.progname = argv[0]\r\n super(Command, self).run_from_argv(argv)", "def set_argv(self, string):\n try:\n self.argv = string.split(' ')\n except AttributeError:\n if string:\n self.argv = string\n el...
[ "0.6951482", "0.677626", "0.6738199", "0.6713902", "0.6713902", "0.6705866", "0.6659652", "0.65899885", "0.6431687", "0.6429928", "0.6400785", "0.6399408", "0.6386593", "0.6369564", "0.6331361", "0.6318897", "0.63100475", "0.6309721", "0.62780434", "0.6223838", "0.6214132", ...
0.77736425
0
Sets velocity vector based on direction
def set_velocity(self): if self.direction == 'left': self.x_vel = -2 else: self.x_vel = 2 self.y_vel = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setVelocity(self,v):\n if v is None:\n self.v = Cartesian3DVector()\n else:\n if isinstance(v,Cartesian3DVector):\n self.v = Cartesian3DVector(v.x,v.y,v.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect velocity vector type.\")", "def velocity...
[ "0.76582134", "0.7481519", "0.74589294", "0.7403367", "0.7303088", "0.7303088", "0.7276262", "0.72136307", "0.7208767", "0.72049433", "0.7171627", "0.7148152", "0.71405905", "0.70739", "0.70646393", "0.7031484", "0.69897455", "0.69644165", "0.6944296", "0.69320303", "0.691545...
0.8153879
0
Log the changed names
def _log_changed_names(changed_names: Iterable[Tuple[str, str]]) -> None: if not changed_names: return from .utils import logger logger.warning("New names:") for orig_name, new_name in changed_names: logger.warning("* %r -> %r", orig_name, new_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _name_changed ( self, name ):\n self.name_last = parse_name( name )[-1]\n self.inputs_changed()", "def test_log_update_name(self):\n log_count_init = LoggerHistory.objects.count()\n original_name = self.project.name\n self.project.name = '%s UPDATED' % self.project.name\n ...
[ "0.66539043", "0.6639276", "0.6399256", "0.6380432", "0.63485473", "0.62940335", "0.619992", "0.61700296", "0.61023694", "0.60812205", "0.6038383", "0.59753203", "0.59630597", "0.5961217", "0.59277236", "0.5906765", "0.5823073", "0.5801369", "0.57996845", "0.5778308", "0.5777...
0.8118659
0
findframe(startdir, camera, grating, filename) This routine will search for a filename. It will start in startdir, then it will descend into a directory named after the camera+grating. After that, it will search subdirectories of the form w\d+. If the file is still not found, it fails
def findframe(startdir,camera,grating,filename): grating = re.sub("\/","_",grating) gdir = camera + grating if os.path.isfile(os.path.join(startdir,filename)): return(startdir) else: # now the grating directory if os.path.isfile(os.path.join(startdir,gdir,filename)): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findpaths(path):\n print('[INFO] Searching for .png images in ', path)\n frame_paths = []\n frame_to_path_dict = {}\n path_to_frame_dict = {}\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n if name.find('.png') != -1:\n frame_path = os.path.join(root, name)\n...
[ "0.5503912", "0.54061455", "0.54023004", "0.5358403", "0.5319264", "0.531263", "0.5276591", "0.5222761", "0.5186886", "0.5123499", "0.5107922", "0.50754553", "0.50358665", "0.50259393", "0.50139326", "0.5009165", "0.5004129", "0.50000185", "0.4983033", "0.49472937", "0.494713...
0.92224705
0
C function definition int rtp_smooth(float im, int nx, int ny, int nz, float dx, float dy, float dz, unsigned short mask, float fwhm)
def smooth(self, mri_data): """DEBUG import matplotlib.pyplot as plt self = rtp_smooth """ # image dimension nx, ny, nz = mri_data.img_data.shape if hasattr(mri_data.img_header, 'info'): dx, dy, dz = np.abs(mri_data.img_header.info['DELTA']) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def smoothImage(img):\n # Pillow uses RGB and cv2 uses GBR, so have to convert before and after smoothing\n imgBGR = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB)\n # smoothImgBGR = cv2.fastNlMeansDenoisingColored(imgBGR, None, 10,10,7,21)\n smoothImgBGR = cv2.bilateralFilter(imgBGR, 9, 75, 75)\n ...
[ "0.6486485", "0.6467457", "0.6421818", "0.62798643", "0.62666625", "0.607366", "0.6062642", "0.6041981", "0.5998901", "0.59359425", "0.5898149", "0.5896014", "0.5868493", "0.5863498", "0.5743527", "0.57388455", "0.57168955", "0.57009465", "0.5594682", "0.55919045", "0.5564512...
0.7523696
0
Get the config value with the label.
def get(self, label): if label in self.config[self.env]: return self.config[self.env][label] else: logging.warning(f'Config Mgr->get(): label: {label} not configured') return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getValue(self, label, default=None):\n # Allow getValue using the label string\n if isinstance(label, basestring):\n label = str2Label(label)\n return self._labelDict.get(label, default)", "def get_config_value(self, name):\r\n if name in self.config_values:\r\n ...
[ "0.76343673", "0.7445326", "0.6866368", "0.6811643", "0.6778108", "0.6752221", "0.6746077", "0.6634147", "0.6633436", "0.66317177", "0.6629522", "0.66281676", "0.66143626", "0.6585026", "0.64996743", "0.64693654", "0.6405412", "0.63843614", "0.6352854", "0.6325311", "0.628574...
0.78867954
0
Return a fullyformatted connect string for psycopg2.connect using the config paramters.
def get_db_connect_string(self): template_string = "host={} dbname={} user={} password={}" return template_string.format(self.get("DB_HOST"), self.get("DB_NAME"), self.get("DB_USER"), self.g...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_connection_string(self):\n auth = ''\n if self.user:\n auth = self.user\n if self.password:\n auth = auth + ':' + self.password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, h...
[ "0.714022", "0.71283305", "0.69476783", "0.6788406", "0.6728916", "0.67060965", "0.6638018", "0.6636255", "0.6568879", "0.6510854", "0.645065", "0.6360388", "0.62624675", "0.6222213", "0.61551327", "0.61442584", "0.6135438", "0.6134094", "0.6118196", "0.60870296", "0.6026554"...
0.7425035
0
Returns a formatted connect string to connect to the "landing" DB (which is the preexisting "studentdb") in order to be able to drop and create the "sparkifydb".
def get_db_landing_connect_string(self): template_string = "host={} dbname={} user={} password={}" return template_string.format(self.get("DB_HOST"), self.get("DB_LANDING_NAME"), self.get("DB_LANDING_USER"), ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_db_connect_string(self):\n template_string = \"host={} dbname={} user={} password={}\"\n return template_string.format(self.get(\"DB_HOST\"),\n self.get(\"DB_NAME\"),\n self.get(\"DB_USER\"),\n ...
[ "0.7096417", "0.6597485", "0.65960795", "0.645154", "0.64452803", "0.61906487", "0.61793053", "0.61535686", "0.6109083", "0.6074845", "0.60448676", "0.60006577", "0.59533644", "0.5940615", "0.58933663", "0.58868223", "0.58568895", "0.58527315", "0.57882077", "0.57715565", "0....
0.76311576
0
Redirect to the 'get_absolute_url' of an Entry, accordingly to 'object_id' argument
def entry_shortlink(request, object_id): entry = get_object_or_404(Entry, pk=object_id) return redirect(entry, permanent=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relative_view_on_site(request, content_type_id, object_id):\n try:\n content_type = ContentType.objects.get(pk=content_type_id)\n if not content_type.model_class():\n raise http.Http404(\"Content type %s object has no associated model\" % content_type_id)\n obj = content_type...
[ "0.72540265", "0.65143955", "0.65035427", "0.6472303", "0.6195901", "0.6094221", "0.60339874", "0.60005844", "0.59937936", "0.59699005", "0.59617144", "0.5954032", "0.59317863", "0.59166944", "0.58625966", "0.5861316", "0.58577865", "0.58354175", "0.5827268", "0.5827268", "0....
0.7944688
0
Converts the type (cls) to a class
def get_class(cls): class Foo(object): def __init__(self): pass x = Foo() x.__class__ = cls return x.__class__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)", "def _class(self, *args):\r\n\r\n if ...
[ "0.69712764", "0.69088846", "0.6750393", "0.66642344", "0.6608613", "0.6541871", "0.6487506", "0.6476722", "0.6397707", "0.6386939", "0.6368144", "0.6345723", "0.630231", "0.6275217", "0.62611365", "0.6149443", "0.6148302", "0.61340964", "0.61101025", "0.6106781", "0.6064178"...
0.71117383
0
Adds newclasses to list of base classes. Creates a new class for the instance. Original class is in bases[0]
def add_base_classes(x, newclasses): bases = list(x.__class__.__bases__) if bases[0] is object: bases[0] = x.__class__ if any(x in bases for x in newclasses): raise PermitError("Cannot insert duplicate classes.") bases = bases + newclasses x.__class__ = ty...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_class(self, new_class):\n index = self._counter\n self._counter += 1\n for element in new_class:\n self._class_names[element] = index\n node = self.part[index].append(element)\n self._place[element] = node", "def with_metaclass(meta, *bases):\r\n r...
[ "0.66618234", "0.64001256", "0.64001256", "0.6344961", "0.6301278", "0.62983423", "0.6222371", "0.6013382", "0.5826684", "0.5763189", "0.57119316", "0.57008797", "0.56934845", "0.56647617", "0.5627335", "0.5617749", "0.5607377", "0.5586276", "0.55615836", "0.5545617", "0.5525...
0.82117367
0
Removes base classes. If there are no more base classes, returns the original class slotted at bases[0]
def remove_base_class(x, cls): bases = list(x.__class__.__bases__) original_class = bases[0] other_classes = bases[1:] if cls in other_classes: other_classes.remove(cls) else: raise PermitError("Class {0} not in list of base classes {1}".format(cls, bases)...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mro(cls): # pragma: no cover\n if hasattr(cls, 'mro'):\n return cls.__mro__\n\n def _mro(cls):\n m = [cls]\n for base in cls.__bases__:\n m += _mro(base)\n return m\n\n mro_list = _mro(cls)\n\n # If a class appears multiple times (due to multiple inheritance)...
[ "0.6278506", "0.5917506", "0.5868634", "0.5854781", "0.58289456", "0.5770308", "0.57575125", "0.5735956", "0.5680516", "0.5679301", "0.56645495", "0.56414235", "0.55493724", "0.55452394", "0.553261", "0.55212045", "0.54923844", "0.5323664", "0.5301268", "0.5242089", "0.513890...
0.74186534
0
Saves samples collected from 3Axis Accelerometers. Samples should be recieved in form (ID X Y Z Time /r/n) with a space to seperate the data. Invalid data is rejected and any samples lost this way are recorded and displayed to the user once the function finishes. All samples are collected from the port defined by the s...
def collect_samples(serialPort,NO_SENSORS,NO_SAMPLES,log): run = '1' badSamples = 0 count = 1 log_temp = [] temp = [0] * 20 NO_FIELDS = (NO_SENSORS * 3) + 1 while (run == '1'): # If the input buffer is not empty read the data out into rawData using \n as a delimiter. if ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_data(self, no_of_samples, interval):\r\n\r\n #tempory storage while function is completing\r\n temp_return_list = []\r\n\r\n #colec\r\n for i in range(0,no_of_samples):\r\n\r\n print(i)\r\n sensor_value = self.sen.get_sensor_value()\r\n\r\n te...
[ "0.6557734", "0.5812748", "0.5615559", "0.55976075", "0.5516802", "0.54913497", "0.54551655", "0.5406054", "0.5381206", "0.53806", "0.53675306", "0.5364671", "0.5356917", "0.53516746", "0.5319687", "0.5301007", "0.5295816", "0.525568", "0.52149343", "0.5212219", "0.5188131", ...
0.7342835
0
A function that takes a numpy array, given by the np_samples(numpy aray) parameter, of samples collected from 3Axis Accelerometers in the form [ID,X,Y,Z,Time] and returns a numpy area of samples where each ID value has the same amount of samples. Each unique ID relates to an associated sensor and the total number of se...
def equalise_sample_numbers(np_samples,NO_SENSORS): length = [] # Holds the amount of samples associated with each ID # Finds the number of samples for each sensor by checking ID and saves the amount in list for i in range(1,NO_SENSORS+1): length.append((np_samples[:,0] == i).sum()...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _avg_sample(self):\n samples = [0] * self.num_samples\n for i in range(self.num_samples):\n samples[i] = self.sensor.measure_distance()\n time.sleep(self.sample_delay)\n if self.drop_extremes:\n samples.sort()\n samples = samples[1:-1]\n ...
[ "0.5047761", "0.49634433", "0.4951183", "0.49368143", "0.48976904", "0.48730645", "0.48015064", "0.47932002", "0.47760272", "0.47559175", "0.47461358", "0.4691884", "0.46875986", "0.46452206", "0.46400303", "0.46148774", "0.45951375", "0.45937127", "0.45784828", "0.45669794", ...
0.6461404
0
Reads the csv file from the given path(string) and returns it as a list
def read_csv(path): csv_data =[] with open(path, 'r') as csv_file: csv_read = csv.reader(csv_file, dialect='excel') for row in csv_read: csv_data.append(row) return(csv_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def read_csv(path):\r\n output = []\r\n for row in csv.DictReader(open(path)):\r\n output.append(row)\r\n return output", "def read_csv(path):\n output = []...
[ "0.84735274", "0.84323573", "0.8389204", "0.83887297", "0.82988846", "0.8259558", "0.8183878", "0.8143425", "0.80833566", "0.7938903", "0.79075766", "0.7878327", "0.7857819", "0.78288424", "0.7688525", "0.76732665", "0.7578928", "0.7515018", "0.74723434", "0.74717313", "0.745...
0.8536996
0
Takes a numpy array of 3Axis Accelerometer data of the form [X1,Y1,Z1,Time,X2,Y2,Z2,Time.....XN,YN,ZN,Time] with any number of rows that relate to the number of samples for each sensor and N defined by the NO_SENSORS(int) parameter. A numpy array of dimension [n][(N4)] should therfore be provided with np_data(numpy arr...
def save_as_csv(path,data,NO_SENSORS): HEADER1 = [ ['Sensor 1'], ['X','Y','Z','Time/ms'] ] HEADER2 = [ ['Sensor 1',' ',' ',' ','Sensor 2'], ['X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ] HEADER3 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3'], ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeDataCSV(data,outpath,outfile,out_head=None,message='data'):\n if (out_head is not None):\n #nhead = out_head.count(',') + 1\n nhead = len(out_head.split(',')) # Split header at every comma\n if (data.shape[1] != nhead):\n print('Warning: No. of fields does not match numb...
[ "0.62333757", "0.60136515", "0.59167486", "0.58182275", "0.5800514", "0.57883203", "0.5723681", "0.5680198", "0.55766135", "0.5498357", "0.54279953", "0.5375188", "0.53628296", "0.5357926", "0.53508735", "0.52884793", "0.5267095", "0.5256821", "0.52335376", "0.5195675", "0.51...
0.763759
0
Plots 3Axis accelerometer data on seperate graphs per sensor each in a seperate figure. The next figure will appear once the first figure is closed. Takes a numpy array of 3Axis Accelerometer data of the form [X1,Y1,Z1,Time,X2,Y2,Z2,Time.....XN,YN,ZN,Time] with any number of rows that relate to the number of samples fo...
def plot_multifig(data,NO_SENSORS,dataSelection): # Axis options yAxisLimits = [[0,1024],[-3,3]] # Plots a seperate graph for each sensor for i in range(0,NO_SENSORS): plt.figure(i + 1) plt.title('Sensor ' + str(i + 1)) plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_singlefig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots graphs for each sensor on 1 figure\n plt.figure(1)\n for i in range(0,NO_SENSORS):\n # The figure is seperated into subplots using the parameter. 231 means 2 rows, 3 col...
[ "0.6957092", "0.6196753", "0.6050141", "0.6022858", "0.5895816", "0.5823652", "0.57994175", "0.57699615", "0.57496446", "0.572262", "0.5720663", "0.56938666", "0.5690304", "0.56666124", "0.56643283", "0.56529903", "0.5643261", "0.5638599", "0.56349975", "0.56215554", "0.56083...
0.7285975
0
Plots 3Axis accelerometer data on seperate graphs per sensor but displays them all in one figure. Takes a numpy array of 3Axis Accelerometer data of the form [X1,Y1,Z1,Time,X2,Y2,Z2,Time.....XN,YN,ZN,Time] with any number of rows that relate to the number of samples for each sensor and N defined by the NO_SENSORS(int) ...
def plot_singlefig(data,NO_SENSORS,dataSelection): # Axis options yAxisLimits = [[0,1024],[-3,3]] # Plots graphs for each sensor on 1 figure plt.figure(1) for i in range(0,NO_SENSORS): # The figure is seperated into subplots using the parameter. 231 means 2 rows, 3 columns, subplot...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_multifig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots a seperate graph for each sensor\n for i in range(0,NO_SENSORS):\n plt.figure(i + 1)\n plt.title('Sensor ' + str(i + 1))\n plt.plot(data[:,(3 + (4 * i))],d...
[ "0.72387475", "0.60290945", "0.6005947", "0.5955035", "0.58564967", "0.58080417", "0.58020985", "0.57991344", "0.5739967", "0.5708146", "0.5702322", "0.5696365", "0.5665857", "0.56347615", "0.5624619", "0.56230587", "0.5581395", "0.5572585", "0.55696213", "0.5556687", "0.5547...
0.6846585
1
Returns a dictionary of RefSeq genes (by chromosome and strand with 'name' parameter as key) from UCSC genome browser (equivalent to RefSeq ID)
def fetchRefSeq(genome = 'hg18',lookupval = 'name'): cursor=gbdbConnect(gbdbname=genome) select="SELECT * FROM refGene" cursor.execute(select) rows=cursor.fetchall() output={} for chr in genomelib.chr_names: output[chr]={} output[chr]['+']={} output[chr]['-']={} for r...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetchRefSeqByChrom(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res.setdefault(i.chr,{})\n res[i.chr].setdefault(i.strand,[])\n res[i.chr][i.strand].append(i)\n return re...
[ "0.7468378", "0.6801201", "0.66693723", "0.6430852", "0.6347451", "0.6187618", "0.61827475", "0.604143", "0.5910106", "0.58944297", "0.5880748", "0.5769445", "0.57518154", "0.5728297", "0.5714107", "0.5687991", "0.5635786", "0.5633332", "0.56206924", "0.55868036", "0.5560895"...
0.7134653
1
Returns a dictionary of RefSeq SplicedIntervals (by chromosome and strand) from UCSC table browser. Indexed lists are sorted prior to return for easy search Same as fetchRefSeqIntervals but indexed by chrom and strand
def fetchRefSeqIntervalsIndexed(genome='hg18',proteinCodingOnly=False,verbose=False): cursor=gbdbConnect(gbdbname=genome) select="SELECT * FROM refGene" if verbose: sys.stderr.write("Fetching RefSeq Sequences...\n") cursor.execute(select) rows=cursor.fetchall() output={} for chr in g...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetchRefSeqByChrom(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res.setdefault(i.chr,{})\n res[i.chr].setdefault(i.strand,[])\n res[i.chr][i.strand].append(i)\n return re...
[ "0.60520214", "0.5973717", "0.58647096", "0.5702146", "0.55897397", "0.54622334", "0.5423216", "0.5373043", "0.52435833", "0.5237961", "0.5187223", "0.5170602", "0.5147745", "0.5144643", "0.5126892", "0.50961936", "0.5087652", "0.50801325", "0.5040272", "0.5039929", "0.502408...
0.71107197
0
Checks to see if interval is within a host RefSeq gene (does not test strand!!). If no, returns False. If yes, returns a list of dictionaries for each host RefSeq gene. Keys are consistent with field names from UCSC table refGene.
def hostRefSeq(chr,start,end,strand): cursor=gbdbConnect() selSQL="SELECT * from refGene WHERE chrom='%s' AND txStart<='%d' AND txEnd>='%d'" % (chr,int(start),int(end)) cursor.execute(selSQL) rows=cursor.fetchall() results=[] if cursor.rowcount==0: return False else: for row ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_refrange(self):\n if np.all(np.isnan(self.par)):\n print( 'Run params() before')\n return\n if hasattr(self,'refranges'):\n return self.refranges\n ice_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[1,r,10,0])]\n liq_r = [r for r in...
[ "0.5347442", "0.51855284", "0.5100473", "0.50937337", "0.5092169", "0.5061189", "0.5061189", "0.5053239", "0.50323635", "0.50276566", "0.5026319", "0.50240004", "0.49963844", "0.4995861", "0.49941018", "0.49801216", "0.4969835", "0.4949664", "0.49097306", "0.48739707", "0.485...
0.5767976
0
Checks to see if interval is entirely within a known wgRNA gene (including miRNA). Does consider strand!!! If no flanking host wgRNA, returns False. If yes, returns a list of dictionaries for each host wgRNA gene. Keys are consistent with field names from UCSC table wgRNA.
def testwgRNA(chr,start,end,strand): cursor=gbdbConnect() selSQL="SELECT * from wgRna WHERE chrom='%s' AND strand='%s' AND chromStart<='%d' AND chromEnd>='%d'" % (chr,strand,int(start),int(end)) cursor.execute(selSQL) rows=cursor.fetchall() results=[] if cursor.rowcount==0: return False ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_intervals(what):\n\n intervals = []\n result = []\n\n for interval in what:\n for seen_interval in intervals:\n if ((interval[1] > seen_interval[0]) and (interval[0] <= seen_interval[0])) or \\\n ((interval[0] < seen_interval[1]) and (interval[0] >= seen_interval[0]...
[ "0.55903125", "0.5488711", "0.54818463", "0.5360363", "0.5360247", "0.53407776", "0.5336744", "0.52925354", "0.52756387", "0.5243182", "0.5230628", "0.5215184", "0.51692766", "0.5168161", "0.515725", "0.515458", "0.51419836", "0.51233953", "0.5118567", "0.5113086", "0.5111238...
0.5631509
0
returning all elements names from ``MedicationKnowledgeDefinitional`` according specification, with preserving original sequence order.
def elements_sequence(cls): return [ "id", "extension", "modifierExtension", "definition", "doseForm", "intendedRoute", "ingredient", "drugCharacteristic", ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"code\",...
[ "0.5559592", "0.55173665", "0.54835874", "0.54470605", "0.5411243", "0.53615123", "0.53325474", "0.53293043", "0.5318013", "0.5305079", "0.5292543", "0.5288954", "0.52888376", "0.5249178", "0.5247725", "0.52077264", "0.5173604", "0.51669055", "0.5159579", "0.515878", "0.51551...
0.55813897
0
returning all elements names from ``MedicationKnowledgeIndicationGuideline`` according specification, with preserving original sequence order.
def elements_sequence(cls): return ["id", "extension", "modifierExtension", "indication", "dosingGuideline"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def names(self):\n return list(item.name for item in self.mechanisms)", "def get_specification_kinds(specifications):\n specifications.setdefault(\"manual event models\", {\"tags\": [\"manual event models\"]})\n return [\"manual event models\"]", "def elements_sequence(cls):\n return [\n ...
[ "0.53641224", "0.5295433", "0.5286177", "0.52009326", "0.518853", "0.5163249", "0.5158666", "0.51429915", "0.5131586", "0.5112963", "0.51038444", "0.50617266", "0.50558203", "0.50551116", "0.5047262", "0.50398177", "0.5026788", "0.50196815", "0.49980664", "0.4971098", "0.4967...
0.5673163
0
Get the shortDOI for a DOI. Providing a cache dictionary will prevent multiple API requests for the same DOI.
def shorten(doi, cache={}, verbose=False): if doi in cache: return cache[doi] quoted_doi = urllib.request.quote(doi) url = 'http://shortdoi.org/{}?format=json'.format(quoted_doi) try: response = requests.get(url).json() short_doi = response['ShortDOI'] except Exceptio...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_short_doi_url(doi):\n quoted_doi = urllib.request.quote(doi)\n url = 'http://shortdoi.org/{}?format=json'.format(quoted_doi)\n headers = {\n 'User-Agent': get_manubot_user_agent(),\n }\n try:\n response = requests.get(url, headers=headers).json()\n short_doi = response['...
[ "0.6388651", "0.5892598", "0.5868604", "0.5838415", "0.58303165", "0.56667435", "0.5500369", "0.5332493", "0.5332032", "0.5264312", "0.52386665", "0.5169312", "0.5138275", "0.5088207", "0.5036826", "0.50280404", "0.5017073", "0.49617666", "0.49116126", "0.4906788", "0.4874662...
0.76168287
0
Return a bibtexparser entry for a DOI
def get_bibtex_entry(doi, bibtext_cache={}, shortdoi_cache={}): bibtext = get_bibtext(doi, cache = bibtext_cache) if not bibtext: return None short_doi = shorten(doi, cache = shortdoi_cache) parser = BibTexParser() parser.ignore_nonstandard_types = False bibdb = bibtexparser.loa...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tagged_doc(self, doi):\n\n return self.tagged_docs[list(map(lambda x: x.tags[0], self.tagged_docs)).index(doi)]", "def doi(self):\n return LiteratureReader(self.record).doi", "def getDOIFromCitation(citation):\n try:\n if \".org/\" in citation:\n DOI = citation.split...
[ "0.62905365", "0.62419164", "0.5902826", "0.5854164", "0.57459015", "0.55268157", "0.54975694", "0.54722095", "0.54414177", "0.5378782", "0.53721684", "0.5337533", "0.52963126", "0.5257952", "0.52536035", "0.52379805", "0.52209264", "0.5174101", "0.5173018", "0.5162918", "0.5...
0.74704564
0
Computes the forward cone from point p.
def forward_cone(self, p): return to_rec(zip(p, self.top))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backward_cone(self, p):\n return to_rec(zip(self.bot, p))", "def conjgradient(x, p, gprev, gnew):\r\n gnew = np.array(gnew)[np.newaxis]\r\n gprev = np.array(gprev)[np.newaxis]\r\n gnew = gnew.T\r\n gprev = gprev.T\r\n beta = (gnew.T)@gnew/((gprev.T)@gprev)\r\n gnew = gnew.flatten()\r...
[ "0.624741", "0.62070286", "0.5786345", "0.57365304", "0.5715694", "0.56874", "0.5583418", "0.55606633", "0.5546253", "0.5532969", "0.55318826", "0.55309975", "0.54770863", "0.5470166", "0.54118145", "0.54030555", "0.53950745", "0.5390107", "0.5386848", "0.536962", "0.53636897...
0.715168
0
Computes the backward cone from point p.
def backward_cone(self, p): return to_rec(zip(self.bot, p))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conjgradient(x, p, gprev, gnew):\r\n gnew = np.array(gnew)[np.newaxis]\r\n gprev = np.array(gprev)[np.newaxis]\r\n gnew = gnew.T\r\n gprev = gprev.T\r\n beta = (gnew.T)@gnew/((gprev.T)@gprev)\r\n gnew = gnew.flatten()\r\n beta = beta.flatten()\r\n p = -gnew + beta*p\r\n return p", ...
[ "0.6597751", "0.633653", "0.6120033", "0.59766096", "0.5962403", "0.56558436", "0.5647515", "0.56286216", "0.5613398", "0.56100065", "0.56100065", "0.5609343", "0.55576277", "0.5546407", "0.5496917", "0.54839945", "0.5470489", "0.5455691", "0.54530066", "0.54523903", "0.54291...
0.7388151
0
Ensures label commands fail with too few arguments.
def testTooFewArgumentsFails(self): invocations_missing_args = ( # Neither arguments nor subcommand. ['label'], # Not enough arguments for 'set'. ['label', 'set'], ['label', 'set', 'filename'], # Not enough arguments for 'get'. ['label', 'get'], # Not ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def invalid_args(event):\n\n s.sendReply(\n event,\n f'Please provide the proper arguments. Use \"@{s.BOT_NAME} help\" for help.',\n )", "def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_val...
[ "0.6378849", "0.617358", "0.61700785", "0.6165488", "0.6146504", "0.61405295", "0.6113329", "0.60842806", "0.60842806", "0.60597503", "0.60424715", "0.6028631", "0.6018959", "0.60188454", "0.6001575", "0.59583277", "0.593172", "0.58977133", "0.58879304", "0.58873475", "0.5849...
0.7460278
0
Generates dynamically a sql update query by eliminating the columns that have value to None
def sql_filtered_update(table, set_columns, where_columns, values): for index in range(len(set_columns) - 1, -1, -1): if values[index] is None: del set_columns[index] del values[index] set_columns = [col + ' = ?' for col in set_columns] columns_to_set = ', '.join(set_columns)...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_scrub_sql():\r\n # it seems incredibly hard to get SQLAlchemy to emit a fully-compiled SQL\r\n # string that including data values. i gave up after trying this method with\r\n # the \"dialect\" sqlalchemy.dialects.mysql.mysqldb.MySQLDialect()\r\n # https://sqlalchemy.readthedocs.org/en/latest/f...
[ "0.7133753", "0.7039731", "0.69529456", "0.6863801", "0.67174447", "0.6690802", "0.6452646", "0.64026815", "0.63402575", "0.62404656", "0.620921", "0.61646706", "0.60941637", "0.6006477", "0.5963547", "0.58923066", "0.58101135", "0.57359624", "0.57193387", "0.5708814", "0.569...
0.7526104
0
Generates dynamically a sql insert query by eliminating the columns that have value to None
def sql_filtered_insert(table, set_columns, values): for index in range(len(set_columns) - 1, -1, -1): if values[index] is None: del set_columns[index] del values[index] values_fields = ['?'] * len(set_columns) query_columns = ', '.join(set_columns) values_fields = ', '.j...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_insert_query(self, query, columns, table_name):\n cols = \"\"\n values = \"\"\n on_dupe_values = \"\"\n\n for column in columns:\n cols += \"`{}`, \".format(column)\n values += \"%({})s, \".format(column)\n on_dupe_values += \"{} = VALUES({}), ...
[ "0.71312165", "0.70737326", "0.6936334", "0.6610949", "0.6493307", "0.6297049", "0.62800425", "0.62800425", "0.6227936", "0.6121583", "0.60976523", "0.60733664", "0.60127187", "0.59995824", "0.59413385", "0.5925546", "0.5920737", "0.5906229", "0.586013", "0.58541316", "0.5853...
0.7548947
0
Check if SQLite3 module is threadsafe
def is_sqlite3_threadsafe(): try: import sqlite3 as sql conn = sql.connect(':memory:') threadsafety = conn.execute('SELECT * FROM pragma_compile_options WHERE compile_options LIKE \'THREADSAFE=%\'').fetchone()[0] conn.close() if int(threadsafety.split("=")[1]) == 1: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parallel_safe(self):\n return True", "def parallel_safe(self):\n\n return True", "def check_db():\n try:\n conn = sqlite3.connect(DB_PATH)\n cursor = conn.cursor()\n cursor.execute(\"SELECT id FROM query LIMIT 1;\")\n conn.close()\n except:\n init_db()...
[ "0.62684345", "0.6167844", "0.56663895", "0.5534087", "0.547586", "0.5453264", "0.5426761", "0.5393733", "0.5359209", "0.53327715", "0.5332757", "0.5324714", "0.5302915", "0.52833873", "0.52824384", "0.52329636", "0.52141863", "0.5197171", "0.51928973", "0.5184741", "0.514778...
0.8585573
0
Get token from Vault.
def get_token(): global vault_token global vault_token_time current_app.logger.info('************* GET TOKEN METHOD **************') return 'root' if validate_token(): vault_duration = None try: auth_type = current_app.config.get('VAULT_AUTH', 'TOKEN') current...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def token(request: Request):\n return get_token()", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def get(uid: int, token_id: int):\n\n token = Token.get(uid, token_id).as_dto().to_p...
[ "0.7275925", "0.7074497", "0.7003901", "0.68835914", "0.6838572", "0.68307024", "0.676279", "0.67362076", "0.6676773", "0.666665", "0.66557", "0.6624486", "0.6624486", "0.65986884", "0.65986884", "0.65986884", "0.65887886", "0.65849286", "0.6560688", "0.6560688", "0.6544694",...
0.80251646
0
Authenticate with user and password, default auth path is LDAP
def authenticate_ldap(): return _userpwd_auth(current_app.config.get('VAULT_AUTH_PATH', 'ldap'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticate(self, username=None, password=None, **kwargs):\n logging.debug(\"LDAP authenticatation: username=%s\" % username)\n is_active = True # User activity flag\n is_superuser = False # Superuser flag\n # Prepare template context\n context = {\n \"username\...
[ "0.71238625", "0.7024816", "0.68941444", "0.68333524", "0.682195", "0.6764249", "0.67410976", "0.66589737", "0.6557559", "0.65375555", "0.65371275", "0.6479368", "0.6479368", "0.64316696", "0.6416964", "0.63971233", "0.63791126", "0.63758403", "0.63447416", "0.63050395", "0.6...
0.8045247
0
Generate GCP JWT for Vault authentication.
def _generate_gcp_jwt(): role = current_app.config.get('VAULT_AUTH_ROLE') account = current_app.config.get('VAULT_AUTH_ACCOUNT') if role and account: headers = {'Metadata-Flavor': 'Google'} url = 'http://metadata/computeMetadata/v1/instance/service-accounts/' + account + '/identity' ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime...
[ "0.6614784", "0.6614367", "0.65258294", "0.6461658", "0.64035386", "0.6360927", "0.63380235", "0.6322474", "0.62901527", "0.6256548", "0.6229225", "0.6217421", "0.61592877", "0.6156592", "0.614825", "0.6128425", "0.60756314", "0.60685194", "0.6028773", "0.60129017", "0.599216...
0.8443291
0
GCP JWT authentication function.
def authenticate_gcp(): role = current_app.config.get('VAULT_AUTH_ROLE') if role: url = '{}/v1/auth/{}/login'.format(current_app.config.get('VAULT_URL'), current_app.config.get('VAULT_AUTH_PATH', 'gcp')) if url.split('//')[0].lower() == 'https:': ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login():\n print(request.get_json())\n user = request.get_json()['username']\n passwd = request.get_json()['passwd']\n user_check = storage.get_user(User, user)\n if not user:\n return jsonify(message='missing value'), 401\n if not user_check:\n return jsonify(message='error'), ...
[ "0.6536674", "0.6521387", "0.64874554", "0.6439283", "0.64367366", "0.64048326", "0.63683885", "0.63635916", "0.6344219", "0.6336104", "0.6336104", "0.6291958", "0.62848336", "0.62541", "0.62541", "0.62484086", "0.6247485", "0.62096065", "0.6151544", "0.60900754", "0.608544",...
0.65679944
0
Specified to add the potential partial solution to the info dict.
def expose_potential_partial_solution(self): self._is_partial_solution_exposed = True return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_partials_yes(self):\n cp_save = dm.options['include_check_partials']\n dm.options['include_check_partials'] = True\n\n cases = [self.brach_explicit_partials,\n self.balanced_field_partials_radau,\n self.min_time_climb_partials_gl]\n\n parti...
[ "0.5336766", "0.5204248", "0.50944495", "0.50701666", "0.503315", "0.5031378", "0.49841353", "0.49736056", "0.4957122", "0.493283", "0.49248284", "0.49117887", "0.49109685", "0.49100757", "0.4907853", "0.4907853", "0.49010634", "0.4893403", "0.48574725", "0.48571035", "0.4852...
0.6785565
0
Specified to add the full ground truth state to the info dict.
def add_ground_truth_state_to_info(self): self._is_ground_truth_state_exposed = True return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __add_current_state_to_state_dict(self):\n board_fen = self.board_fen()\n if board_fen not in self.states:\n self.states[self.board_fen()] = GameState(self.board_array())", "def update_truth(self, ground_truth):\n self.ground_truth = ground_truth", "def __init__(self):\n ...
[ "0.6266897", "0.61687815", "0.5671283", "0.54962367", "0.54451495", "0.5404208", "0.53883934", "0.53883934", "0.5335978", "0.5324146", "0.53026766", "0.52942204", "0.5273841", "0.52662015", "0.525729", "0.52440625", "0.5214837", "0.5212029", "0.5202931", "0.5199911", "0.51986...
0.8061191
0
Determines the number of valleys that were walked through given a sequence of characters representing an altitudinal traveral ("U" stands for "up", "D" stands for "down").
def counting_valleys0(s): # . Let v_steps track consecutiveness # . If positive to negative and if v_steps < 2, then # increment v_steps # . If negative to positive, then reset v_steps to 1 # and increment valleys # . THINK: You can easily implement a mountains passed # counter ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def countingValleys(n, s):\n\n elevation = 0\n valleys = 0\n\n for char in s:\n if char == 'U':\n elevation +=1\n elif char == 'D':\n if elevation == 0:\n valleys += 1\n elevation -= 1\n\n return valleys", "def count_ambig(curr_seq, valid_...
[ "0.787841", "0.6130476", "0.60904425", "0.6050508", "0.591375", "0.58611935", "0.5833444", "0.5750658", "0.56289816", "0.5620559", "0.5612861", "0.5600865", "0.5549459", "0.553956", "0.5531055", "0.5510508", "0.55068606", "0.55058426", "0.54959077", "0.5491007", "0.5478747", ...
0.719043
1
Returns a list of all tables relevant for a bind.
def get_tables_for_bind(self, bind=None): return [table for table in list(self.Model.metadata.tables.values()) if table.info.get('bind_key') == bind]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tables(conn):\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT name FROM sqlite_master\n WHERE type='table' AND name NOT LIKE 'sqlite_%';\n \"\"\")\n tables = cur.fetchall()\n\n return tables", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\...
[ "0.7201292", "0.7104701", "0.6972192", "0.6968997", "0.69183815", "0.6867139", "0.6861783", "0.6858579", "0.6854755", "0.6749029", "0.6615288", "0.65660226", "0.6531382", "0.6527741", "0.65209895", "0.65070254", "0.65040284", "0.6495937", "0.64294606", "0.64141154", "0.640553...
0.8611967
0
Load the current board state and winloss record for the given phone from the db
def readBoard(phone): cursor = cnx.cursor() query = ("SELECT board,win,loss,draw FROM connectfour WHERE phone=%s") cursor.execute(query, (phone,)) row = cursor.fetchone() if row is None: cursor.close() cursor = cnx.cursor() query = ("IN...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveBoard(phone,board,win,loss,draw):\r\n cursor = cnx.cursor()\r\n query = (\"UPDATE connectfour SET board=%s,win=%s,loss=%s,draw=%s,updated=CURRENT_TIMESTAMP WHERE phone=%s\")\r\n cursor.execute(query, (board.save(),win,loss,draw,phone))\r\n cursor.close()\r\n cnx.commit()"...
[ "0.6271187", "0.6174814", "0.588046", "0.5659565", "0.5567539", "0.55583787", "0.5470912", "0.53004444", "0.5197919", "0.5100611", "0.5088175", "0.5068241", "0.50355256", "0.5034419", "0.50063074", "0.49890283", "0.49616575", "0.4892639", "0.4892557", "0.48855442", "0.4875144...
0.75203294
0
Save the current board state and winloss record for the given phone to the db
def saveBoard(phone,board,win,loss,draw): cursor = cnx.cursor() query = ("UPDATE connectfour SET board=%s,win=%s,loss=%s,draw=%s,updated=CURRENT_TIMESTAMP WHERE phone=%s") cursor.execute(query, (board.save(),win,loss,draw,phone)) cursor.close() cnx.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readBoard(phone):\r\n cursor = cnx.cursor()\r\n query = (\"SELECT board,win,loss,draw FROM connectfour WHERE phone=%s\")\r\n cursor.execute(query, (phone,))\r\n row = cursor.fetchone()\r\n if row is None:\r\n cursor.close()\r\n cursor = cnx.cursor()\r\n ...
[ "0.6470638", "0.6128905", "0.5942916", "0.58818704", "0.579189", "0.5678843", "0.5672766", "0.5644867", "0.56161314", "0.5545671", "0.5525992", "0.5502162", "0.5501767", "0.54918563", "0.54908663", "0.5488081", "0.54879624", "0.5451699", "0.54198986", "0.5358391", "0.5348812"...
0.8068914
0
Process all the files matched with the `files_pattern` and output the results in `output`
def process_files(files_pattern, output, options=None): if options is None: options = {} queue = Queue(100) files = glob.glob(files_pattern, recursive=True) total_count = len(files) logging.info("starting to parse %s files", total_count) write_results_process = Process(target=write_re...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_files(self):\n matcher = self.choose_algorithm()\n # process one file at the time for better memory management\n for i, element in enumerate(self.input):\n filepath, _ = element\n\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as readfi...
[ "0.74225605", "0.6755515", "0.67088324", "0.6494828", "0.64312804", "0.6383366", "0.62620753", "0.6241482", "0.6226911", "0.62268376", "0.61927044", "0.61892426", "0.6179227", "0.61475813", "0.6125794", "0.597787", "0.59764105", "0.59764105", "0.5958008", "0.5955142", "0.5944...
0.8119285
0
displaytime = 15000 | 30000 | 60000 | 120000| 300000 | 600000 | 1800000 displaytime default is 1800000
def set_display_time(log_mes,displaytime = 1800000): kill_adb_uiautomator_block_old() if int(get_screen_off_time(log_mes)) == displaytime: if int(displaytime) >= 60000: log_mes.info( 'screen off time is already %s mins'%(displaytime/60000)) else: log_mes.info('screen off ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_on_display(self, a_display, a_last_time_display): #pylint: disable-msg=R0201 \n current_time = datetime.datetime.utcnow()\n if not a_last_time_display:\n a_display.print_screen(self.mem_db, current_time, self._sort_criteria)\n return current_time\n else:\n ...
[ "0.68090326", "0.66291904", "0.6462265", "0.63614434", "0.6307813", "0.6285851", "0.62751526", "0.6242384", "0.62390834", "0.6192343", "0.6117744", "0.6111203", "0.6092385", "0.60823214", "0.60819495", "0.608081", "0.6054099", "0.6031724", "0.60215324", "0.5962884", "0.593660...
0.7196308
0
Creates a new Participant model, filling in some default constructor args. This is intended especially for updates, where more fields are required than for inserts.
def _participant_with_defaults(self, **kwargs): defaults = { 'hpoId': UNSET_HPO_ID, 'withdrawalStatus': WithdrawalStatus.NOT_WITHDRAWN, 'suspensionStatus': SuspensionStatus.NOT_SUSPENDED, 'participantOrigin': 'example', 'version': 1, 'lastM...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_participant(name='Not Brian', email='test@email.com') ->\\\n Participant:\n participant = Participant(name=name, email=email)\n return participant", "def __init__(self,\r\n username=None,\r\n first_name=None,\r\n last_name=None,\r\n ...
[ "0.6663132", "0.6514317", "0.6252609", "0.6068203", "0.60313624", "0.59170717", "0.58870864", "0.5882688", "0.5867263", "0.5828657", "0.5822187", "0.5800505", "0.5759603", "0.5748322", "0.57338744", "0.5728453", "0.5710991", "0.5684834", "0.5683016", "0.5629623", "0.56254387"...
0.6780463
0
Dict like resource getter
def __getitem__(self, name): return self.__resources[name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resource_map(self):", "def get_resource_data(self, **kw):\n data = dict(\n url=self['url'],\n dist=self['name'])\n data.update(kw)\n return data", "def data(self):\n return { # TODO Actually query for this shit\n \"foo\": self.__name__,\n ...
[ "0.66327727", "0.6343687", "0.6287191", "0.6218388", "0.61918527", "0.6092798", "0.60210764", "0.60139334", "0.59463495", "0.5932285", "0.59251744", "0.59157777", "0.58940756", "0.58883554", "0.58654296", "0.5848385", "0.5831318", "0.58073753", "0.5795992", "0.5769386", "0.57...
0.6858929
0
Resolve a filename relative to this directory
def resolve(self, name): # pylint: disable = E1101 root = _os.path.normpath('/') resolved = _os.path.splitdrive(_os.path.normpath( _os.path.join(root, unicode(name).encode('utf-8')) ))[1] while resolved.startswith(root): resolved = resolved[1:] re...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resolve(fname):\n if os.path.dirname(__file__):\n return os.path.dirname(__file__) + \"/../common/\" + fname\n else:\n return \"/common/\" + fname", "def rel_filename(filename, relative_to=None):\n if relative_to is None:\n relative_to = os.getcwd()\n if not relative_to.endsw...
[ "0.7362332", "0.6747728", "0.6739235", "0.65846235", "0.656219", "0.65612847", "0.65566796", "0.6551496", "0.65442425", "0.65062994", "0.65059465", "0.6505097", "0.6504365", "0.6454143", "0.6390446", "0.6387397", "0.6368316", "0.632986", "0.631312", "0.6308752", "0.62896514",...
0.72245663
1
returns a list of functions that should fail when the network/trustline_0_1 is frozen and their arguments the functions are expected to be called from accounts[0]
def frozen_functions_and_args(accounts): return [ ["transfer", (1, 2, [accounts[0], accounts[1]], b"")], ["transferReceiverPays", (1, 2, [accounts[0], accounts[1]], b"")], ["transferFrom", (1, 2, [accounts[0], accounts[1]], b"")], ["updateTrustline", (accounts[1], 101, 101, 101, 101,...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def condition_for_function(f, abi, all_not_in_ABI):\n\n\tcondition = []\n\tfor n in f.entry_points:\n\t\t[category, num] = api.get_category_for_name( n )\n\t\tif category not in abi:\n\t\t\tcondition.append( 'defined(need_%s)' % (gl_XML.real_category_name( category )) )\n\t\telif all_not_in_ABI:\n\t\t\treturn []\n...
[ "0.5650193", "0.54443383", "0.54156166", "0.537168", "0.5303468", "0.51958776", "0.515159", "0.5115263", "0.50573635", "0.50471103", "0.5032907", "0.49691454", "0.49658495", "0.49336487", "0.49330318", "0.49018726", "0.48831254", "0.4877388", "0.48731956", "0.4864257", "0.486...
0.7283338
0
The trustline in between 0 and 1 is frozen, tests that it cannot be used in a mediate transfer
def test_mediate_transfer_fails_if_intermediate_trustline_frozen( currency_network_contract_with_frozen_trustline, accounts ): network = currency_network_contract_with_frozen_trustline path = [accounts[4], accounts[0], accounts[1], accounts[2]] with pytest.raises(eth_tester.exceptions.TransactionFaile...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_transfer_blocked(chain, token, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=False)\n with pytest.raises(ValueError):\n token.transact({\"from\": shareholder1}).transfer(boogieman, 4000)", "def test_32(self):\n assert 'False' == Api.requestBlock('test-32')", ...
[ "0.637659", "0.6006959", "0.59612626", "0.59547615", "0.5950883", "0.59490114", "0.59403247", "0.592359", "0.59158957", "0.58958477", "0.5869624", "0.58487654", "0.5818297", "0.58152175", "0.58116984", "0.58068", "0.5805351", "0.57946134", "0.5792743", "0.5792591", "0.5785922...
0.6220759
1
Sets the build_types of this ProblemScope.
def build_types(self, build_types): self._build_types = build_types
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_type(self, build_type):\n\n self._build_type = build_type", "def scm_types(self, scm_types):\n\n self._scm_types = scm_types", "def types(self, types):\n\n self._types = types", "def types(self, types: List[str]):\n if types is None:\n raise ValueError(\"Inval...
[ "0.65033895", "0.6066945", "0.5788191", "0.5584121", "0.5559509", "0.52263755", "0.50761986", "0.4917929", "0.49125266", "0.48962516", "0.4873129", "0.47903404", "0.4772615", "0.4762409", "0.47446603", "0.4708357", "0.4683082", "0.46532643", "0.46285158", "0.45791152", "0.456...
0.7905692
0
Sets the build_type of this ProblemScope.
def build_type(self, build_type): self._build_type = build_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_types(self, build_types):\n\n self._build_types = build_types", "def build_type(self) -> Optional[pulumi.Input['BuildTypeArgs']]:\n return pulumi.get(self, \"build_type\")", "def __init__(self, project=None, build_types=None, build_type=None, teamcity=None): # noqa: E501 # noqa: E501\...
[ "0.625394", "0.59458286", "0.5855856", "0.577268", "0.5698405", "0.5533615", "0.5451951", "0.54216546", "0.5392611", "0.53884697", "0.5381613", "0.5381613", "0.5381613", "0.5381613", "0.53579956", "0.53508645", "0.53253186", "0.5323894", "0.5323894", "0.5235474", "0.5209653",...
0.7677981
0
A weighted version of keras.objectives.categorical_crossentropy for keras (2.0.6). This lets you apply a weight to unbalanced classes.
def weighted_categorical_crossentropy(weights): weights = K.variable(weights) def loss(y_true, y_pred): # scale predictions so that the class probas of each sample sum to 1 y_pred /= K.sum(y_pred, axis=-1, keepdims=True) # clip to prevent NaN's and Inf's y_pred = K.clip...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weighted_categorical_crossentropy(weights):\n weights = K.variable(weights)\n\n def loss(y_true, y_pred):\n y_hat = y_pred / K.sum(y_pred, axis=-1, keepdims=True)\n y_hat = K.clip(y_hat, K.epsilon(), 1 - K.epsilon())\n loss = y_true * K.log(y_hat) * weights\n loss = - K.sum(lo...
[ "0.8203031", "0.79314363", "0.7234755", "0.71510255", "0.702699", "0.70055", "0.70055", "0.69127864", "0.6847676", "0.67895746", "0.6735132", "0.66833335", "0.6594962", "0.6553082", "0.6502841", "0.6399763", "0.63869053", "0.63820964", "0.63813084", "0.6356557", "0.6352567", ...
0.842637
0
Compare the current token type with the passed token type and if they match then "eat" the current token and assign the next token to the self.current_token otherwise raise an exception.
def eat(self, token_type): if self.current_token.type == token_type: self.current_token = self.lexer.get_next_token() # print(self.current_token) else: self.error()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_type(self, token_type):\n if isinstance(self.cursor(), token_type):\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_type))\n return token", "def next_token(self, context: PluginScanContext, token: M...
[ "0.6593315", "0.65260625", "0.650497", "0.64615226", "0.6456286", "0.634234", "0.62530714", "0.6209854", "0.6142705", "0.60957164", "0.6057084", "0.6043257", "0.60196143", "0.5970951", "0.591296", "0.5848229", "0.57907677", "0.57583374", "0.5704527", "0.5701983", "0.5666284",...
0.7741878
0
Eat all the next EOL
def eat_EOL(self): # print("Start eating EOL") self.eat(EOL) while self.current_token.type == EOL: self.eat(EOL) # print("Stop eating EOL")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maybe_eol(self):\n if self.current == CR:\n self.next()\n if self.current == LF:\n self.next()\n elif self.current == LF:\n self.next()", "def eol(self):\n if self.current not in EOL:\n self.on_parser_error(\"EOL expected\")\n ...
[ "0.795126", "0.7004974", "0.6905897", "0.679999", "0.67539823", "0.6661748", "0.6637306", "0.65803117", "0.6556912", "0.6528691", "0.6528691", "0.6528691", "0.6525438", "0.6516711", "0.6511128", "0.6441263", "0.6417774", "0.6373478", "0.63628477", "0.6303268", "0.6265118", ...
0.7717989
1
Returns a list of content types from the models defined in settings.
def _get_seo_content_types(seo_models): try: return [ContentType.objects.get_for_model(m).id for m in seo_models] except Exception: # previously caught DatabaseError # Return an empty list if this is called too early return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listFeaturableContentTypes():", "def get_searchable_content_types():\n record = dict(interface=ICoverSettings, name='searchable_content_types')\n return api.portal.get_registry_record(**record)", "def content_types(self):\n return self.get(\"content_type\", decode=True).split(\"#\")", ...
[ "0.7349977", "0.71621644", "0.6720084", "0.6582432", "0.65513587", "0.64740825", "0.6369539", "0.634687", "0.62149596", "0.6032758", "0.6010961", "0.6010262", "0.59955335", "0.59955335", "0.5927626", "0.5916104", "0.5896257", "0.5886604", "0.58839566", "0.58676517", "0.585921...
0.7348473
1
split a strem into a list of blocks of size block_size
def block_split(stream, block_size=BLOCK_SIZE_IN_BYTES): # TODO: this could possibly be a generator return [stream[i:i + BLOCK_SIZE_IN_BYTES] for i in range(0, len(stream), BLOCK_SIZE_IN_BYTES)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf", ...
[ "0.7169465", "0.7118412", "0.6919498", "0.68670726", "0.68363047", "0.6794331", "0.67409796", "0.673424", "0.67077184", "0.6663819", "0.66617143", "0.6639705", "0.65981317", "0.65813637", "0.6572411", "0.65618354", "0.6526337", "0.65255904", "0.6516533", "0.6511457", "0.65091...
0.74972177
0
discover bluetooth le peripherals and their chracteristics
def discover_BLE_characteristics(lodBLE): #logging = tls.console_logger() cb.set_verbose(True) cb.reset() Delg = bleDelegate(lodBLE) cb.set_central_delegate(Delg) cb.scan_for_peripherals() logging.info('Waiting for callbacks state=%s' % (cb.get_state())) while not Delg.allFound(): time.sleep(1) logging.info...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discover(bt_addr):\n print \"performing inquiry...\"\n nearby_devices = bluetooth.discover_devices(lookup_names = True)\n print \"Found %d devices\" % len(nearby_devices)\n \n for addr, name in neaby_devices:\n print \" %s - %s\" % (addr, name)", "def scan_bluetooth(self):\n near...
[ "0.6978275", "0.67073125", "0.6545722", "0.6475751", "0.6446607", "0.63599527", "0.6293589", "0.6150195", "0.60710776", "0.59620595", "0.58640736", "0.5854606", "0.57990634", "0.5784767", "0.5665145", "0.5649242", "0.5633475", "0.56149334", "0.5598885", "0.5506579", "0.547448...
0.75675184
0
Set up the Spotify platform.
def async_setup_spotify(hass, config, configurator): return async_setup(hass, config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def async_setup(hass, config):\n import spotipy.oauth2\n import json\n global AIS_SPOTIFY_TOKEN\n\n try:\n ws_resp = aisCloud.key(\"spotify_oauth\")\n json_ws_resp = ws_resp.json()\n spotify_redirect_url = json_ws_resp[\"SPOTIFY_REDIRECT_URL\"]\n spotify_client_id = json_ws_...
[ "0.6380721", "0.6271231", "0.61243886", "0.6054209", "0.6025928", "0.5995937", "0.59487516", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.5921665", "0...
0.63484156
1
Set up the Spotify platform.
def async_setup(hass, config): import spotipy.oauth2 import json global AIS_SPOTIFY_TOKEN try: ws_resp = aisCloud.key("spotify_oauth") json_ws_resp = ws_resp.json() spotify_redirect_url = json_ws_resp["SPOTIFY_REDIRECT_URL"] spotify_client_id = json_ws_resp["SPOTIFY_CLIE...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def async_setup_spotify(hass, config, configurator):\n return async_setup(hass, config)", "def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"ent...
[ "0.63484156", "0.6271231", "0.61243886", "0.6054209", "0.6025928", "0.5995937", "0.59487516", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.59220606", "0.5921665", "...
0.6380721
0
asks for destination base
def destination_base_input(destination_base): if 2 <= destination_base <= 9: return destination_base else: print("invalid input")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDest(): #status: Done, Tested\r\n pass", "def _GuessBase(self, required):\r\n url = self._GetInfo(\"URL\")\r\n if url:\r\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)\r\n guess = \"\"\r\n # TODO(anatoli) - repository specific hacks should be handle...
[ "0.6239136", "0.6081792", "0.60812646", "0.58890224", "0.58708084", "0.58682895", "0.5796034", "0.57916695", "0.57642895", "0.5762684", "0.57504994", "0.5716583", "0.5704606", "0.5647763", "0.5647447", "0.56273687", "0.56166524", "0.5603385", "0.5594626", "0.5593369", "0.5592...
0.65736556
0
asks user for input decimal number and base to which to convert
def main(destination_base, max_number, decimal_number): if 2 <= destination_base <= 9: if 0 <= decimal_number <= max_number: converted_number = base_conversion(destination_base, decimal_number) print(f"the converted number is: {converted_number}") else: print("inv...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decimal_number_input(decimal_number, destination_base, max_number):\n if 0 <= decimal_number <= int(max_number):\n print(f\"the converted number is: {base_conversion(destination_base, decimal_number)}\")\n else:\n print(\"invalid input\")\n return decimal_number", "def base_converter(d...
[ "0.76400197", "0.69913197", "0.67633325", "0.67291844", "0.6524661", "0.65105337", "0.6500028", "0.6495628", "0.64876056", "0.6460031", "0.6343496", "0.63026404", "0.6264527", "0.6256234", "0.623991", "0.6216938", "0.6164334", "0.6153163", "0.6125251", "0.6108441", "0.6103717...
0.7455561
1
helper function; decodes a section of the netloc from punycode.
def decode_punycode(label): try: return idna.decode(label.encode('ascii')) except UnicodeError: pass except ValueError as exc: # see https://github.com/john-kurkowski/tldextract/issues/122 # if "narrow Python build" in exc.args[0]: # warnings.warn("can not decode puny...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(data): #@NoSelf", "def decode(data):\n raise NotImplementedError", "def decode(self, code):\n raise NotImplementedError", "def decode(self, shortUrl):\n return self.demap[shortUrl]", "def decode(self, s):", "def decode(self, s):", "def decode_network_string(msgtype, plen...
[ "0.6572104", "0.61086315", "0.59683406", "0.5781539", "0.5771769", "0.5771769", "0.57637405", "0.57140577", "0.5689829", "0.5652979", "0.5637785", "0.5637785", "0.56200993", "0.5609995", "0.55943227", "0.5592023", "0.55431294", "0.5532244", "0.54975724", "0.54969454", "0.5494...
0.6290453
1
Resize colour images to the required scales and augment if required We create the color_aug object in advance and apply the same augmentation to all images in this item. This ensures that all images input to the pose network receive the same augmentation.
def preprocess(self, inputs, color_aug): for k in list(inputs): if "color" in k: n, im, i = k inputs[(n, im, 0)] = self.resize(inputs[(n, im, - 1)]) for k in list(inputs): if "color" in k: f = inputs[k] n, im, i = k...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def img_recolor(self, args, input_image_path):\n \n ec = encoder.Encoder(output_path=args.intermediate_representation, method=args.method,\n size=args.size, p=args.p, grid_size=args.grid_size, plot=args.plot, quantize=args.quantize)\n dc = decoder.Decoder(output_pat...
[ "0.6309083", "0.5947522", "0.59078306", "0.5813178", "0.57557356", "0.57447666", "0.57365495", "0.56254", "0.5547125", "0.54938376", "0.54680014", "0.5387826", "0.534617", "0.53367794", "0.5330937", "0.53264517", "0.5302511", "0.5274359", "0.52589434", "0.52519834", "0.523822...
0.71007115
0
Create multiple sync instances from the config and filesystem info.
def create_all_sync_instances(self): # Get directories to sync dirs_to_sync_by_sync_instance = self.get_dirs_to_sync(self.config['sync_hierarchy_rules']) # Store all known running sync instances here to potentially kill later # unhandled_sync_instances = copy.deepcopy(dirs_to_sync_by_sy...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n self.create_all_sync_instances()", "def create_sync_objects(self, subcloud_name, capabilities):\n\n endpoint_type_list = capabilities.get('endpoint_types', None)\n if endpoint_type_list:\n self.sync_objs[subcloud_name] = {}\n for endpoint_type in endpoi...
[ "0.6529905", "0.6203876", "0.6001982", "0.5916084", "0.56833124", "0.5660356", "0.56500673", "0.55826265", "0.55749094", "0.5474997", "0.546527", "0.5450697", "0.53788894", "0.53397506", "0.5328996", "0.530742", "0.52719736", "0.5265014", "0.52415264", "0.52197003", "0.520473...
0.6603992
0
Kill unison instance by it's PID. Includes builtin protection for accidentally killing a nonunison program, and even other unison programs not started with this script. This ensures that this function will never kill a PID that we have not started with unisonctrl. Paramaters int pid to kill must be a PID started in thi...
def kill_sync_instance_by_pid(self, pid): # Get the list of known pids to ensure we only kill one of those running_data = self.data_storage.running_data self.logger.debug( "Attempting to kill PID '" + str(pid) + "'" ) known_pids = [] # Gets PIDs of all the ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill(pid):\n p = psutil.Process(pid)\n\n try:\n p.kill()\n except Exception:\n pass", "def kill_pid(pid):\n try:\n # Unable to import 'module'\n # pylint: disable=no-member,F0401\n import signal\n return os.kill(pid, signal.SIGTERM)\n except ImportError:\n pass", "de...
[ "0.7319361", "0.7115674", "0.7110478", "0.70291764", "0.6886039", "0.6852084", "0.67194027", "0.67028004", "0.6644425", "0.6624735", "0.6603372", "0.6584298", "0.65174264", "0.64720327", "0.6353765", "0.6335386", "0.63055", "0.6287493", "0.6258707", "0.6181982", "0.61499745",...
0.72457254
1
Ensure all expected processes are still running. Checks the running_data list against the current PID list to ensure all expected processes are still running. Note that if everything works as expected and does not crash, there should never be dead instances. As such, if dead instances appear on a regular basis, conside...
def cleanup_dead_processes(self): # Get the list of processes we know are running and we think are running # Also, convert each PID to int to make sure we can compare actually_running_processes = self.get_running_unison_processes() l = self.data_storage.running_data supposedly_ru...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_instance_pids_still_alive(self):\n missing_instances = []\n running_pids = psutil.pids()\n for instance in self.all_instances:\n if instance.pid not in running_pids:\n missing_instances.append(instance)\n\n if len(missing_instances) > 0:\n ...
[ "0.7389043", "0.6897777", "0.6816135", "0.6575307", "0.6531362", "0.6480457", "0.6444165", "0.6391992", "0.63755715", "0.6345061", "0.6262", "0.6207695", "0.6162055", "0.6090997", "0.60754627", "0.59734434", "0.594996", "0.5924127", "0.5866743", "0.58547074", "0.5805897", "...
0.74760336
0
Return PIDs of currently running unison instances.
def get_running_unison_processes(self): # Get PIDs # Note: throws exception if no instances exist try: pids = str(subprocess.check_output(["pidof", '/usr/bin/unison'])) # Parse command output into list by removing junk chars and exploding # string with space ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids", "def detect_instance_pids(self):\n for instance in self.all_instances:\n ...
[ "0.6817835", "0.66776186", "0.66677475", "0.652992", "0.64818245", "0.64350146", "0.64334714", "0.6432905", "0.6400463", "0.6384319", "0.63838774", "0.63823915", "0.6377238", "0.6373727", "0.63699615", "0.6306814", "0.6305249", "0.63029253", "0.6282789", "0.6215774", "0.62072...
0.86521405
0
Import config from config, and apply details where needed.
def import_config(self): # Get the config file import config # Get all keys from keyvalue pairs in the config file settingsFromConfigFile = [x for x in dir(config) if not x.startswith('__')] # Convert config file into dict for key in settingsFromConfigFile: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, config):\n config_location = None\n try:\n try:\n stream = config.read()\n if hasattr(config, 'name'):\n config_location = config.name\n except (AttributeError, TypeError):\n f = file(config)\n ...
[ "0.7061655", "0.6931682", "0.67424726", "0.6520522", "0.65100807", "0.64880663", "0.64506406", "0.6448089", "0.6418599", "0.6414208", "0.63963264", "0.6380314", "0.63017356", "0.62915653", "0.62915236", "0.6271026", "0.6271026", "0.6242148", "0.62274957", "0.6216356", "0.6194...
0.7400688
0
Send the predict request to the backend server, get the return value and do the post process Predict the input image, and get the result. User must specify the image_path, servable_name, dataset_name and output_strategy to get the predict result.
def predict(self, img_path, servable_name, dataset_name="mnist", strategy="TOP1_CLASS"): # Check if args are valid if not os.path.isfile(img_path): print("The image path {} not exist!".format(img_path)) sys.exit(0) trans_func = transform_checker.get(dataset_name) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict():\r\n \r\n data = {\"success\": False}\r\n if flask.request.files.get(\"image\"):\r\n # read image from request\r\n image = flask.request.files[\"image\"].read()\r\n # convert image to BGR\r\n image = read_image_bgr(io.BytesIO(image))\r\n # preprocess image ...
[ "0.7527697", "0.7489377", "0.733074", "0.7176334", "0.70035565", "0.6992929", "0.6954645", "0.68714213", "0.68302935", "0.67761964", "0.67373866", "0.67367834", "0.6716685", "0.66948694", "0.6670388", "0.66675407", "0.6614825", "0.66144085", "0.65787727", "0.6575756", "0.6552...
0.78791124
0
Generates four day weekend report The four day weekends are calculated from the start_month through the end of the year along with the number of work days for the same time period. The reports takes into account any holidays that might fall within that time period and days designated as working from home (WFH). If show...
def four_day_weekends( *args, start_month: int = 8, paid_time_off: int = 200, year: int = 2020, show_workdays: bool = False ) -> None: if len(args) > 0: raise ValueError(ERROR_MSG) # get number of weekends to subtract because holiday of US holidays for year h...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def four_day_weekends(*args,\n start_month: int = 8,\n paid_time_off: int = 200,\n year: int = 2020,\n show_workdays: bool = False\n ) -> None:\n\n\n if args:\n raise ValueError(ERROR_MSG)\n else:\n four_day_weekends = workdays = 0\n weekend_dates =[]\n ...
[ "0.7783672", "0.5774272", "0.56252056", "0.5575767", "0.5506491", "0.5342154", "0.5337909", "0.52582586", "0.52426636", "0.5213161", "0.5204552", "0.5204155", "0.5193903", "0.51596296", "0.51428", "0.5108332", "0.510329", "0.50756997", "0.504167", "0.502892", "0.5019758", "...
0.7266772
1
Computes normalized quantile loss for torch tensors. Uses the qRisk metric as defined in the "Training Procedure" section of the main TFT paper.
def normalized_quantile_loss(actuals: torch.Tensor, predictions: torch.Tensor, quantiles: List[float] = None) -> torch.Tensor: normalizer = torch.sum(abs(actuals)) if quantiles == None: QL = QuantileLoss(quantiles=[0.1, 0.5, 0.9]) else: QL = QuantileLoss(quantiles=quantiles) q_loss = QL...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numpy_normalised_quantile_loss(self, y_pred, y, quantile):\n if not isinstance(y_pred, paddle.Tensor):\n y_pred = paddle.to_tensor(y_pred,paddle.float32)\n\n if len(y_pred.shape) == 3:\n ix = self.quantiles.index(quantile)\n y_pred = y_pred[..., ix]\n\n if ...
[ "0.70270723", "0.6815313", "0.58613586", "0.58225995", "0.56911457", "0.5627937", "0.5546004", "0.5536235", "0.54864985", "0.54816455", "0.547545", "0.54679716", "0.546322", "0.54552704", "0.53829426", "0.5378242", "0.53772986", "0.53498733", "0.5348454", "0.534481", "0.53448...
0.7660437
0
Sets cookie if user exists and passes status.
def set_cookie(): decoded_request = json.loads(request.data) email = decoded_request['email'] password = decoded_request['password'] incorrect_pw_msg = 'Oh no, this does not look like its correct. Please try again.' user = crud.get_user_by_email(email) correct_password = crud.is_correct_passwo...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_user_cookie_id():\n #new fresh user\n if not request.cookies.get(config.COOKIE_ADSABS2_NAME):\n if current_user.is_anonymous():\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()\n #the user has already visited the web ...
[ "0.7051497", "0.6615434", "0.63359445", "0.63336045", "0.63336045", "0.6249339", "0.6206051", "0.6161316", "0.61472225", "0.61126524", "0.60267013", "0.6026603", "0.59424293", "0.59401476", "0.5921613", "0.5901393", "0.5874812", "0.58216405", "0.5815492", "0.58044195", "0.580...
0.68235534
1