query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Copied from AccountBroker before the container_count column was added. Create policy_stat table which is specific to the account DB. Not a part of Pluggable Backends, internal to the baseline code.
def pre_track_containers_create_policy_stat(self, conn): conn.executescript(""" CREATE TABLE policy_stat ( storage_policy_index INTEGER PRIMARY KEY, object_count INTEGER DEFAULT 0, bytes_used INTEGER DEFAULT 0 ); INSERT OR IGNORE INTO policy_stat ( storage_policy_index, object_count, bytes_used ) SELECT 0, object_count, bytes_used FROM account_stat WHERE container_count > 0; """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_track_containers_create_container_table(self, conn):\n # revert to old trigger script to support one of the tests\n OLD_POLICY_STAT_TRIGGER_SCRIPT = \"\"\"\n CREATE TRIGGER container_insert_ps AFTER INSERT ON container\n BEGIN\n INSERT OR IGNORE INTO policy_stat\n ...
[ "0.6394596", "0.6312397", "0.52926415", "0.5226492", "0.521135", "0.5126524", "0.50752866", "0.50496316", "0.49747297", "0.49588305", "0.4934026", "0.48855892", "0.48768532", "0.4860666", "0.48166457", "0.4806398", "0.4796792", "0.4787185", "0.477994", "0.47572404", "0.475716...
0.8145411
0
Copied from AccountBroker before the container_count column was added (using old stat trigger script) Create container table which is specific to the account DB.
def pre_track_containers_create_container_table(self, conn): # revert to old trigger script to support one of the tests OLD_POLICY_STAT_TRIGGER_SCRIPT = """ CREATE TRIGGER container_insert_ps AFTER INSERT ON container BEGIN INSERT OR IGNORE INTO policy_stat (storage_policy_index, object_count, bytes_used) VALUES (new.storage_policy_index, 0, 0); UPDATE policy_stat SET object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used WHERE storage_policy_index = new.storage_policy_index; END; CREATE TRIGGER container_delete_ps AFTER DELETE ON container BEGIN UPDATE policy_stat SET object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used WHERE storage_policy_index = old.storage_policy_index; END; """ conn.executescript(""" CREATE TABLE container ( ROWID INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, put_timestamp TEXT, delete_timestamp TEXT, object_count INTEGER, bytes_used INTEGER, deleted INTEGER DEFAULT 0, storage_policy_index INTEGER DEFAULT 0 ); CREATE INDEX ix_container_deleted_name ON container (deleted, name); CREATE TRIGGER container_insert AFTER INSERT ON container BEGIN UPDATE account_stat SET container_count = container_count + (1 - new.deleted), object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used, hash = chexor(hash, new.name, new.put_timestamp || '-' || new.delete_timestamp || '-' || new.object_count || '-' || new.bytes_used); END; CREATE TRIGGER container_update BEFORE UPDATE ON container BEGIN SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT'); END; CREATE TRIGGER container_delete AFTER DELETE ON container BEGIN UPDATE account_stat SET container_count = container_count - (1 - old.deleted), object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used, hash = chexor(hash, old.name, old.put_timestamp || '-' || old.delete_timestamp || '-' || old.object_count || '-' || old.bytes_used); END; """ + OLD_POLICY_STAT_TRIGGER_SCRIPT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prespi_create_container_table(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_us...
[ "0.74409187", "0.7198487", "0.61117744", "0.59451663", "0.5910709", "0.58744675", "0.5785568", "0.57599217", "0.5757854", "0.5751283", "0.5730389", "0.5673858", "0.56548923", "0.5640886", "0.5640633", "0.56349385", "0.56337035", "0.56274104", "0.5623988", "0.5606793", "0.5597...
0.75585115
0
Start running uvicore server.
def run(app_location: str, host: str, port: int): # https://github.com/tiangolo/fastapi/issues/1508 uvicorn.run( app_location, host=host, port=port, log_config=None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def local_main():\n uvicorn.run(app, host=\"0.0.0.0\", port=5000)", "async def start(self):\n server = await asyncio.start_server(\n self.handle_request, self.host, self.port)\n\n addr = server.sockets[0].getsockname()\n print(f'Serving on {addr}')\n\n async with server:...
[ "0.7084234", "0.6992241", "0.69815814", "0.6813422", "0.68041515", "0.6786285", "0.6786285", "0.6771331", "0.6730415", "0.669539", "0.6544975", "0.648505", "0.6399624", "0.63707113", "0.6362181", "0.63410026", "0.6337383", "0.6334539", "0.63275516", "0.63197315", "0.62906486"...
0.6218118
25
Collation function to be used with data loaders
def collate(self, batch): images = [] indices = [] roi_size = 5 if self.Train else 4 rois = torch.zeros((len(batch), 20, roi_size), dtype=torch.float32) rois = rois.to(batch[0][1].device) for _b in range(len(batch)): # Accumulate patches: images.append(batch[_b][0].to(torch.float32)) indices.append(batch[_b][2]) # Accumulate ROI: """ image_num = torch.Tensor([_b]).expand(batch[_b][1].size(0)) image_num = image_num.type(batch[_b][1].dtype).view(-1,1) image_num = image_num.to(batch[_b][1].device) _roi = torch.cat([image_num, batch[_b][1]], dim=1) rois = torch.cat([rois, _roi], dim=0) """ num_boxes = batch[_b][1].size(0) rois[_b,:num_boxes,:] = batch[_b][1] # Stack outputs and return batch = [torch.stack(images, dim=0), rois, torch.Tensor(indices)] return batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collation(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"collation\")", "def collation(self) -> str:\n return pulumi.get(self, \"collation\")", "def collation(self) -> str:\n return pulumi.get(self, \"collation\")", "def collation(self) -> Optional[pulumi.Input[str]]:\n ...
[ "0.6623606", "0.65305656", "0.65305656", "0.64108706", "0.64108706", "0.63947916", "0.63842875", "0.6326007", "0.6215365", "0.6073148", "0.60546684", "0.58473516", "0.583685", "0.55808395", "0.5559438", "0.53668314", "0.5345691", "0.5240828", "0.52396196", "0.523723", "0.5231...
0.0
-1
Compute the class count of ROIs for each sample.
def count_classes(self, index=None): if index is None: index = np.arange(self.Samples.shape[0]) elif isinstance(index, int): index = [index] count = np.zeros((len(index), len(self._classes)), dtype=np.int) for _ind in range(len(index)): rois = self.__getrois__(index[_ind]) count[_ind, :] = np.bincount(rois[:,4].astype(np.int), minlength=len(self._classes)) return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_classes(self):", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for...
[ "0.6769185", "0.6670084", "0.6666405", "0.6666405", "0.6666405", "0.6578789", "0.6500897", "0.64842236", "0.6412822", "0.63247687", "0.6280898", "0.6262814", "0.62501544", "0.6188363", "0.6161805", "0.6135146", "0.6125166", "0.6116448", "0.60772496", "0.6040804", "0.60212356"...
0.73258656
0
Balance ROI instances across the dataset
def balance_classes(self, classids): # Get ROI class counts for each sample patch: samples = self.SampleID counts = self.count_classes(samples) counts = counts[:, classids] totalcount = np.sum(counts, axis=0) # Find the class with minimum and maximum total count: c_min = np.argmin(totalcount) c_max = np.argmax(totalcount) # Class balancing is performed as long as the min-max class ratio is # not within 50%. # # Balancing Algorithm: # * Randomly sample from samples with non-zero min-class ROI counts # and zero maximum class ROIs. # * Simulaneously, randomly sample a subset of max-class only samples # to be removed from the dataset. This levels the field from both # directions. class_ratio = totalcount[c_min] / totalcount[c_max] while (class_ratio < 0.5) & (len(samples) < 3*5000): # Find samples with maximum min-max class ratio: N = np.sum((counts[:,c_min] > 0) & (counts[:,c_max] == 0)) M = int(0.5*N) # Min-class samples to add: min_sample = np.nonzero((counts[:,c_min]>0) & (counts[:,c_max]==0)) min_sample = min_sample[0] # Unfold tuple min_sample = min_sample[np.random.randint(0, len(min_sample)-1, N)] # Max-class samples to remove: max_sample = np.nonzero((counts[:,c_min]==0) & (counts[:,c_max]>0)) max_sample = max_sample[0] # Unfold tuple max_sample = max_sample[np.random.randint(0, len(max_sample)-1, M)] max_sample = np.unique(max_sample) # Construct new sample set: min_sample = samples[min_sample] samples = np.append(np.delete(samples, max_sample), min_sample) # Recompute total count and min-max class ratio: counts = self.count_classes(samples)[:, classids] totalcount = np.sum(counts, axis=0) c_min = np.argmin(totalcount) c_max = np.argmax(totalcount) class_ratio = totalcount[c_min] / totalcount[c_max] # Done, balanced, update samples: balancedset = self.Samples[samples,:] self._set_sampling_scheme_(balancedset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def balanceData(dataPath,dataPathFile):\r\n \r\n databaseDict = loadData(os.path.join(dataPath,dataPathFile))\r\n classes = 7\r\n balancedDict = {}\r\n \r\n print('Start Balancing data')\r\n for fold in databaseDict.keys():\r\n fold_dic_not_balanced = databaseDict[fold]\r\n targe...
[ "0.57167405", "0.570118", "0.5557484", "0.5479032", "0.5437137", "0.5420406", "0.5420168", "0.5331262", "0.53278494", "0.5277977", "0.5214856", "0.5206903", "0.51943547", "0.5191577", "0.5144731", "0.5080557", "0.5061859", "0.5055446", "0.5047951", "0.5047951", "0.502992", ...
0.527201
10
Find the maximum number of ROIs per batch sample in the dataset
def get_max_rois(self): maxsize = 0 for index in self.SampleID: rois = self.__getrois__(index); maxsize = max(maxsize, rois.shape[0]) return maxsize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n return int(np.ceil(self.max_index / float(self.batch_size)))", "def max_num_batches(self):\n return self._max_num_batches", "def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed", "def __len__(self):\n return int(np.ceil(len(s...
[ "0.6792534", "0.659393", "0.65083873", "0.64983654", "0.6442182", "0.6357087", "0.6351913", "0.63443124", "0.6337801", "0.630785", "0.62681437", "0.6250322", "0.6208346", "0.6205874", "0.616179", "0.61612123", "0.6137554", "0.6132003", "0.612465", "0.6107981", "0.61064506", ...
0.78903097
0
Worker Initialization Function for parallel batch loading.
def partition(worker_id): worker_info = torch.utils.data.get_worker_info() dataset = worker_info.dataset # Re-create BigTIFF objects that turned stale after serialization: for region in dataset.BigTIFFs: imgfile = dataset.BigTIFFs[region].Source dirID = dataset.BigTIFFs[region].DirectoryID patchSize = dataset.BigTIFFs[region].PatchSize[dirID] dataset.BigTIFFs[region] = Bigtiff(imgfile) dataset.BigTIFFs[region].setDirectory(dirID) dataset.BigTIFFs[region].setPatchSize(patchSize) # configure the dataset to only process the split workload per_worker = int(math.ceil(dataset.SampleID.shape[0] / float(worker_info.num_workers) )) sampleStart = worker_id * per_worker sampleEnd = sampleStart + per_worker dataset.SampleID = dataset.SampleID[sampleStart:sampleEnd]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_batch(self):\n pass", "def _initJobs(self):\n pass", "def start_loading(self):\n if self.loading:\n warnings.warn('Loader is already loading!')\n return\n \n assert self.batch_queue is not None\n batch_queue = self.batch_queue\n \n...
[ "0.7315344", "0.71394104", "0.69043654", "0.67671055", "0.67395073", "0.66971", "0.6604564", "0.6591444", "0.656487", "0.65599173", "0.6558237", "0.65196973", "0.6494201", "0.648432", "0.63900125", "0.6368222", "0.63650346", "0.63589203", "0.63572824", "0.6347132", "0.6300145...
0.0
-1
Chooses starting station based on the least amount of connections and adds it to a new track.
def create_new_track(self, station_list, i, new_grid): self.first_station = self.stations[station_list.pop(0)] track = Track(f"depthfirst_{i}", new_grid) track.add_station(new_grid, self.first_station.name) return track
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_sta...
[ "0.73916966", "0.72347254", "0.6283393", "0.5770273", "0.53530777", "0.5282217", "0.5246007", "0.5129842", "0.5115605", "0.5112706", "0.5046593", "0.50430566", "0.5032552", "0.5032552", "0.5012475", "0.50104666", "0.5006915", "0.49982488", "0.49773306", "0.49151057", "0.48719...
0.59082127
3
Makes dictionary of the station and their amount of connections.
def make_station_dict(self): self.station_dict = {} # interates over stations and puts the amount of connections in the dict for station in self.stations: length = len(self.stations[station].connections) self.station_dict[station] = length return self.station_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stations_dict(self):\n return self.__stations_dict", "def get_online_count():\n return dict(online_user=get_online_users())", "def getConnections():\n\n c = psutil.net_connections()\n connects = {}\n\n count = 0\n for connection in c:\n conn = {}\n status = connection.st...
[ "0.66026574", "0.6212001", "0.61680955", "0.61130697", "0.60983795", "0.60969037", "0.6081789", "0.6016051", "0.59923977", "0.58641666", "0.5816404", "0.58135796", "0.5811761", "0.5791444", "0.5785143", "0.577446", "0.5770902", "0.57675856", "0.5764687", "0.57554936", "0.5753...
0.8745632
0
Sorts the station dict based on the amount of connections (value).
def create_station_list(self): sorted_station_list = sorted(self.station_dict, key=self.station_dict.get) return sorted_station_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)", "def _get_sorted_by_n_connections(m):\n small = ...
[ "0.6438473", "0.6063636", "0.5954494", "0.5753256", "0.572934", "0.5722744", "0.55783653", "0.54763424", "0.5446469", "0.54192704", "0.5373764", "0.531757", "0.5296655", "0.5287621", "0.5285237", "0.5215991", "0.52076167", "0.52006", "0.5183816", "0.51670825", "0.51528376", ...
0.6300883
1
Tries all possible configurations starting at the first station and only adds the configuration with the best score.
def visit_all_possibilities(self, first_station, track, grid): # loops over connections of station for connection in first_station.connections: # keeps adding untill the max length of a track is reached if track.add_station(grid, self.stations[connection].name): # calculates the quality of adding the station and remembers it if it is the best score yet if grid.get_quality() > self.best_score: self.best_score = grid.get_quality() self.grid = copy.deepcopy(grid) print(f"new best score: {self.best_score}:\n{self.grid}\n\n") # repeat untill there are no more configurations left self.visit_all_possibilities(self.stations[connection], track, grid) track.remove_last_station()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_sta...
[ "0.7005242", "0.66232514", "0.6464399", "0.62459195", "0.6219636", "0.6025062", "0.5821818", "0.58028185", "0.56950855", "0.5568496", "0.5530859", "0.5526991", "0.55262834", "0.5522291", "0.547059", "0.5425894", "0.54113275", "0.5394614", "0.5392455", "0.5378019", "0.53733134...
0.71698874
0
Makes the key for authentication according to the specification on Nordnets page
def make_hash(self): timestamp = str(int(round(time.time()*1000))) auth = b64encode(config.username) + ':' \ + b64encode(config.password) + ':' \ + b64encode(timestamp) rsa = RSA.load_pub_key(config.public_key) encrypted_auth = rsa.public_encrypt(auth, RSA.pkcs1_padding) key = b64encode(encrypted_auth) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_key ():", "def private_key(self):", "def public_key(self):", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def authenticator():", "def getAuthKey(self):\r\n auth_key = 'Que despierte la Red'\r\n assert len(auth_key) == self.AUTH_KEY...
[ "0.7266466", "0.6840144", "0.6624", "0.6536083", "0.6467815", "0.6440537", "0.6359403", "0.63546956", "0.63519514", "0.6313089", "0.62246615", "0.62246615", "0.6169524", "0.61688346", "0.6131176", "0.6095102", "0.60891443", "0.6085557", "0.60689396", "0.6060211", "0.6033049",...
0.0
-1
Logs in to the server
def login(self): hashkey = self.make_hash() connection = self.connection or self.connect() parameters = urlencode({ 'service' : config.service, 'auth' : hashkey }) print "parameters for login: '%s'" % (parameters) connectionstring = 'https://' + config.base_url + '/' \ + config.api_version + '/login' logger.info('Trying to login to REST: %s' % connectionstring) logger.info('Applying header: %s' % no_auth_headers) connection.request('POST', connectionstring, parameters, no_auth_headers) response = connection.getresponse() response_as_json = jloads(response.read()) self.auth_session_key = response_as_json['session_key'] self.auth_hostname = response_as_json['public_feed']['hostname'] self.auth_port = response_as_json['public_feed']['port'] basic_auth = b64encode("%s:%s" % (self.auth_session_key, self.auth_session_key)) self.auth_headers = no_auth_headers.copy() self.auth_headers['Authorization']="Basic %s" % (basic_auth) return response_as_json
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login(self):\n\t\treturn", "def login():", "def login():", "def login(self):\n self.client.login(username=self.user.username, password='test')", "def login_user(self):\r\n self.client.login(username=self.user.username, password=\"password\")", "def login(self):\n\n self.__login_i...
[ "0.7973264", "0.7904653", "0.7904653", "0.7693467", "0.7655902", "0.7554838", "0.7549945", "0.7540455", "0.7519316", "0.7478677", "0.746458", "0.74409384", "0.7430161", "0.74290764", "0.7427556", "0.73943985", "0.7350303", "0.7341028", "0.73346376", "0.7320527", "0.73103094",...
0.0
-1
Creates and saves a User with the given email and password.
def _create_user(self, email, password, **extra_fields): if not email: raise ValueError('The given email must be set') email = self.normalize_email(email) user = self.model(email=email, **extra_fields) user.set_password(password) user.save(using=self._db) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def...
[ "0.84163386", "0.8407691", "0.8396383", "0.83900774", "0.83900774", "0.83900774", "0.83818454", "0.83800155", "0.83791924", "0.83788806", "0.83781564", "0.83751047", "0.83726776", "0.83633083", "0.83633083", "0.83633083", "0.8347122", "0.8345504", "0.8345504", "0.8345504", "0...
0.8360308
17
creates a species identified by taxid and containing empty dictionnary of orthologs
def __init__(self, taxid, species_name = None, lineage=None): self.genes = dict() self.taxid = taxid self.species = species_name self.lineage = lineage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_taxon():\n if not exists('./data/taxdmp.zip'):\n ftp = FTP('ftp.ncbi.nih.gov')\n ftp.login()\n ftp.cwd('pub/taxonomy')\n ftp.retrbinary('RETR taxdmp.zip', open('./data/taxdmp.zip', 'wb').write)\n ftp.quit\n with ZipFile('./data/taxdmp.zip', 'r') as dumpfile:\n ...
[ "0.62028277", "0.5643144", "0.55928254", "0.5586047", "0.5550129", "0.5545237", "0.55178034", "0.54141647", "0.5405542", "0.5331211", "0.528797", "0.52788687", "0.5253945", "0.52389264", "0.5223071", "0.52124864", "0.516965", "0.51686347", "0.51635945", "0.49660262", "0.49572...
0.6710074
0
add an entry in the dic with key "human gene ID" and value "ortholog gene ID"
def add_gene(self, human_gene, ortholog): if human_gene not in self.genes: self.genes[human_gene] = list() self.genes[human_gene].append(ortholog)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_gene_info(ensembl_info, word, value):\n if \"gene\" in word:\n if \"id\" in word:\n ensembl_info[\"ensembl_gene_id\"] = value\n elif \"start\" in word:\n ensembl_info[\"gene_start\"] = int(value)\n elif \"end\" in word:\n ensembl_info[\"gene_end\"...
[ "0.65119916", "0.5768898", "0.5745668", "0.5678076", "0.56137764", "0.5577298", "0.5574873", "0.5562769", "0.55310345", "0.54852855", "0.5448665", "0.5448038", "0.5416714", "0.54129606", "0.54046834", "0.539394", "0.5387726", "0.53722453", "0.5369394", "0.53620994", "0.535825...
0.7082176
0
for handling WorkResultMessages from Result queue
def extractWorkResultMessage(messageBody): messageContents = json.loads(messageBody) try: message = WorkResultMessage(body=messageContents) return message except: log.error(str(sys.exc_info()[0]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_results_from_message_queue():\n message_queue.get_result_length()\n logger.info(\"get task results from task queue\")", "def process_messages(self):\n pass", "def _process_worker(call_queue, result_queue):\n while True:\n call_item = call_queue.get(block=True)\n if call_it...
[ "0.6850718", "0.6538829", "0.65200484", "0.64860433", "0.6379262", "0.63759804", "0.62607986", "0.6202013", "0.6202013", "0.61881113", "0.61721927", "0.6146824", "0.61398584", "0.6130029", "0.6085379", "0.60829306", "0.6052195", "0.60503155", "0.6036185", "0.5957413", "0.5952...
0.6439669
4
Returns True is string is a number.
def is_number(s): try: float(s) return True except ValueError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_number(string):\r\n try:\r\n float(string)\r\n return True\r\n except ValueError: return False", "def is_number(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def is_number(s):\r\n try:\r\n int(s)\r\n r...
[ "0.8850884", "0.8807396", "0.87623763", "0.87143797", "0.86495125", "0.86369324", "0.8611852", "0.8602863", "0.85868895", "0.8556723", "0.8540039", "0.846474", "0.83899784", "0.83504224", "0.82912153", "0.8217023", "0.81620115", "0.81181717", "0.8040893", "0.8037833", "0.8027...
0.8597067
10
For some reason, app.dependency_overrides does not accept pytest fixtures as overrider, so this function is needed although it is exactlythe same as db
def testing_get_db() -> Generator: db = TestSessionLocal() try: yield db finally: db.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixtures():", "def fixture_example_data():\n import_example_data()", "def _fixture_setup(self):\n pass", "def db_python_only():\n return os.path.join(_here, 'fixtures/databases/db-python-only/database')", "def load_initial_fixtures_func(app_name):\n return partial(_load_initial_fixtures...
[ "0.6346832", "0.62192637", "0.6143342", "0.59758323", "0.5957415", "0.595324", "0.595152", "0.59219795", "0.58347386", "0.578332", "0.57545054", "0.5665109", "0.5632672", "0.5599801", "0.55725056", "0.5564125", "0.55455124", "0.5536632", "0.5526659", "0.5524314", "0.5510719",...
0.0
-1
Define the edgeR object
def __init__(self, count, group, repl, output): self._table_count = count self._groups_name = group self._replic = repl self._output = output self._message = Message() self._likelihood_column = 2 + len(group)*repl self._fdr_de_column = 4 + len(group)*repl self._likelihood = 0.95 self._fdr = 0.1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args):\n _snap.TNGraphEdgeI_swiginit(self, _snap.new_TNGraphEdgeI(*args))", "def __init__(self, name, edge, start_node, end_node, pipe_model,\n allow_flow_reversal,\n temperature_driven, repr_days=None):\n\n self.logger = logging.getLogger('modest...
[ "0.6473663", "0.6447562", "0.6428948", "0.64194375", "0.6335059", "0.62395126", "0.6237139", "0.6191634", "0.6189968", "0.61835927", "0.6147736", "0.6143343", "0.6141972", "0.61232835", "0.6109794", "0.6109794", "0.6081987", "0.6080473", "0.6080444", "0.6078262", "0.6078262",...
0.0
-1
Execute default analysis with baySeq
def run_bayseq(self): try: res = robjects.r('library("parallel")') res = robjects.r('library("stats4")') res = robjects.r('library("BiocGenerics")') res = robjects.r('library("S4Vectors")') res = robjects.r('library("IRanges")') res = robjects.r('library("GenomeInfoDb")') res = robjects.r('library("abind")') # res = robjects.r('library("perm")') res = robjects.r('library("GenomicRanges")') res = robjects.r('library("baySeq")') res = robjects.r('if(require("parallel")) cl <- makeCluster(4) else cl <- NUL') ct = 'table <- read.csv("' + self._table_count + '", row.names = 1, header = TRUE, stringsAsFactors = FALSE)' res = robjects.r(ct) res = robjects.r('m <- as.matrix(table)') replicates = "" assert isinstance(self._replic, int) for ind in iter(self._groups_name): aux = "'" + ind + "', " replicates = replicates + aux * self._replic replicates = replicates[:(len(replicates) - 2)] replicates = 'replicates <- c(' + replicates + ')' res = robjects.r(replicates) groups = 'groups <- list(NDE = c('+ "1," * len(self._groups_name) groups = groups[:(len(groups) - 1)] + ')' groups = groups + ', DE = c('+ '1,' * self._replic groups = groups + '2,' * self._replic groups = groups[:(len(groups) - 1)] + "))" print(groups) res = robjects.r(groups) res = robjects.r('CD <- new("countData", data = m, replicates = replicates, groups = groups)') res = robjects.r('libsizes(CD) <- getLibsizes(CD)') res = robjects.r('CD <- getPriors.NB(CD, samplesize = 1000, estimation = "QL", cl = cl, equalDispersions = TRUE)') res = robjects.r('CD <- getLikelihoods(CD, prs=c(0.5, 0.5), pET="BIC", cl=cl)') # CD.posteriors.DE < - exp(CD @ posteriors)[, 2] res = robjects.r('write.table(topCounts(CD, group = "DE", number = 65000, normaliseData = TRUE), "' + self._output +'", sep="\t", quote = FALSE)') self._message.message_9("--- baySeq is completed!") except RRuntimeError as rre: self._message.message_9("Error in baySeq execution: " + str(rre)) raise rre
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1", "def run_analysis(self, argv):\n self._run_argparser(argv)\n self.run()", "def run(self) :\n# print \"evaluating with laban\"\n ...
[ "0.5965446", "0.5941467", "0.5888768", "0.5804564", "0.57392657", "0.5699895", "0.5644962", "0.5548859", "0.5535779", "0.5506015", "0.54572743", "0.54180855", "0.5404095", "0.54018456", "0.53995997", "0.53812164", "0.53697616", "0.5368945", "0.5345558", "0.5340646", "0.533377...
0.6089528
0
Set parameters of the instance.
def fit(self, signal): if signal.ndim == 1: self.signal = signal.reshape(-1, 1) else: self.signal = signal return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_params(self):\n raise NotImplementedError", "def set_params(self, **kwargs):\n ...", "def set_params(self):\r\n pass", "def set_params(self, params):", "def set_params(self, **kwargs) -> NoReturn:\n pass", "def set_params(self,**kwargs):\n for key in kwargs:\n ...
[ "0.8175829", "0.8170592", "0.8165186", "0.79629844", "0.78029823", "0.779995", "0.77458686", "0.7707225", "0.7664552", "0.7520573", "0.7518126", "0.7402048", "0.7351057", "0.7351057", "0.7351057", "0.7351057", "0.7351057", "0.73461854", "0.7336423", "0.73144144", "0.73106277"...
0.0
-1
Returns the requested income range view in full detail.
def GetIncomeRangeView(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRange(self):\n return self.range", "def displayActiveRange(selforcls):\n vRange = selforcls.activeRange()\n try:\n vRange = (selforcls.toDisplay(min(vRange)),\n selforcls.toDisplay(max(vRange)))\n except AttributeError:\n pass # toDisp...
[ "0.5824725", "0.5612662", "0.55873716", "0.54605365", "0.54605365", "0.5444421", "0.5324229", "0.5322225", "0.5300629", "0.5277442", "0.5269223", "0.5262803", "0.5230335", "0.52228105", "0.5218109", "0.51848346", "0.5179088", "0.51768607", "0.5176457", "0.51640767", "0.515955...
0.700728
0
This is just a big UI setup
def main(): #------------------------------------- Functions def add(text): """ This will add to the display, and be the go to function of most buttons. We'll want to add in conditions for what buttons go. """ orig = dispb["text"] new = orig + text ops = ["+","-","*","/"] # conditions # length 21 if len(new) > 21: dispb["text"] = orig return 0 # one calc at a time if len(orig) > 0: if (orig[-1] in ops) & (text in ops): dispb["text"] = orig return 0 dispb["text"] = new return 0 def clear(): dispb["text"] = "" return 0 def backspace(): dispb["text"] = dispb["text"][:len(dispb["text"])-1] return 0 def equals(): try: dispb["text"] = str(eval(dispb["text"])) except: dispb["text"]="ERROR, clear display" #------------------------------------- UI # title and start calc = tk.Tk() calc.title("Calculator") # size calc.geometry("255x235") #calc.columnconfigure(range(3), weight=1, minsize=50) #calc.rowconfigure(range(1,4), weight=1, minsize=48) # Icon calc.iconbitmap('Icon.ico')#'Icon.ico') calcarea = tk.Frame(master=calc) calcarea.pack(padx=5, pady=10) # display box disp = tk.Frame( master = calcarea ) disp.grid(row = 0, column = 0, columnspan = 3) dispb = tk.Label( master = disp, text = '', fg = 'black', bg = 'white', borderwidth = 1, relief = 'solid', height = 2, width = 19 ) dispb.pack() # number buttons num1 = tk.Frame( master=calcarea ) num1.grid(row = 3, column = 0) num1b = tk.Button( master = num1, text = 1, width = 5, height = 2, command = lambda: add("1") ).pack() # the pack is what adds it to the UI # two num2 = tk.Frame( master=calcarea ) num2.grid(row = 3, column = 1) num2b = tk.Button( master = num2, text = "2", width = 5, height = 2, command = lambda: add("2") ).pack() # three num3 = tk.Frame( master=calcarea ) num3.grid(row = 3, column = 2) num3b = tk.Button( master = num3, text = "3", width = 5, height = 2, command = lambda: add("3") ).pack() # four num4 = tk.Frame( master=calcarea ) num4.grid(row = 2, column = 0) num4b = tk.Button( master = num4, text = "4", width = 5, height = 2, command = lambda: add("4") ).pack() # five num5 = tk.Frame( master=calcarea ) num5.grid(row = 2, column = 1) num5b = tk.Button( master = num5, text = "5", width = 5, height = 2, command = lambda: add("5") ).pack() # six num6 = tk.Frame( master=calcarea ) num6.grid(row = 2, column = 2) num6b = tk.Button( master = num6, text = "6", width = 5, height = 2, command = lambda: add("6") ).pack() # seven num7 = tk.Frame( master=calcarea ) num7.grid(row = 1, column = 0) num7b = tk.Button( master = num7, text = "7", width = 5, height = 2, command = lambda: add("7") ).pack() # eight num8 = tk.Frame( master=calcarea ) num8.grid(row = 1, column = 1) num8b = tk.Button( master = num8, text = "8", width = 5, height = 2, command = lambda: add("8") ).pack() # nine num9 = tk.Frame( master=calcarea ) num9.grid(row = 1, column = 2) num9b = tk.Button( master = num9, text = "9", width = 5, height = 2, command = lambda: add("9") ).pack() # zero num0 = tk.Frame( master = calcarea ) num0.grid(row = 4, column = 0) num0b = tk.Button( master = num0, text = 0, width = 5, height = 2, command = lambda: add("0") ).pack() # period dot = tk.Frame( master = calcarea ) dot.grid(row = 4, column = 1) dotb = tk.Button( master = dot, text = ".", width = 5, height = 2, command = lambda: add(".") ).pack() # equal sign eq = tk.Frame( master = calcarea ) eq.grid(row = 4, column = 2, columnspan = 2) eqb = tk.Button( master = eq, text = "=", width = 11, height = 2, command = equals ).pack() # plus sign plus = tk.Frame( master = calcarea ) plus.grid(row = 3, column = 4, rowspan = 2) plusb = tk.Button( master = plus, text = "+", width = 5, height = 5, command = lambda: add("+") ).pack() # minus sign minu = tk.Frame( master = calcarea ) minu.grid(row = 3, column = 3) minub = tk.Button( master = minu, text = "-", width = 5, height = 2, command = lambda: add("-") ).pack() # multiplication mult = tk.Frame( master = calcarea ) mult.grid(row = 2, column = 3) multb = tk.Button( master = mult, text = "*", width = 5, height = 2, command = lambda: add("*") ).pack() # division div = tk.Frame( master = calcarea ) div.grid(row = 2, column = 4) divb = tk.Button( master = div, text = "/", width = 5, height = 2, command = lambda: add("/") ).pack() # left parentheses lefp = tk.Frame( master = calcarea ) lefp.grid(row = 1, column = 3) lefpb = tk.Button( master = lefp, text = "(", width = 5, height = 2, command = lambda: add("(") ).pack() # right paraentheses rigp = tk.Frame( master = calcarea ) rigp.grid(row = 1, column = 4) rigpb = tk.Button( master = rigp, text = ")", width = 5, height = 2, command = lambda: add(")") ).pack() # Clear button Clr = tk.Frame( master = calcarea ) Clr.grid(row = 0, column = 3) Clrb = tk.Button( master = Clr, text = "C", width = 5, height = 2, command = clear ).pack() # backspace bck = tk.Frame( master = calcarea ) bck.grid(row = 0, column = 4) bckb = tk.Button( master = bck, text = "\N{RIGHTWARDS BLACK ARROW}", width = 5, height = 2, command = backspace ).pack() # This is what kicks the whole thing off, lets it wait for commands. calc.mainloop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_UI(self):", "def create_widgets(self):", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def init_widget(self):", "def mainWidget(self):\n raise RuntimeError('Not implemented')", "def create_widgets( self ):", "def ...
[ "0.8035472", "0.7076582", "0.70445716", "0.70445716", "0.679653", "0.6779092", "0.6768436", "0.67298573", "0.66679484", "0.6657647", "0.6619304", "0.6563466", "0.6519952", "0.6507672", "0.6497", "0.6435492", "0.6433124", "0.64304245", "0.6426558", "0.64145607", "0.6411895", ...
0.0
-1
This will add to the display, and be the go to function of most buttons. We'll want to add in conditions for what buttons go.
def add(text): orig = dispb["text"] new = orig + text ops = ["+","-","*","/"] # conditions # length 21 if len(new) > 21: dispb["text"] = orig return 0 # one calc at a time if len(orig) > 0: if (orig[-1] in ops) & (text in ops): dispb["text"] = orig return 0 dispb["text"] = new return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self):\n\t\tprint('The button in the window was clicked!')", "def show_main_buttons(self):\n pass", "def _add_buttons(self, gui):\n gui.greet_button.pack()\n gui.close_button.pack()\n gui.buttons_on.set(True)", "def load_buttons(self):\n self.playing...
[ "0.68849444", "0.68576396", "0.6849067", "0.65670127", "0.65635157", "0.65260524", "0.6473985", "0.6426153", "0.640084", "0.64003897", "0.63505423", "0.6331861", "0.6318083", "0.62580913", "0.6250697", "0.62376004", "0.6207637", "0.6195303", "0.61879086", "0.61741465", "0.617...
0.0
-1
Runs a single byte through the packet parsing state amchine. Returns NOT_DONE if the packet is incomplete. Returns SUCCESS is the packet was received successfully. Returns CHECKSUM if a checksum error is detected.
def process_byte(self, byte): if self.index == -1: if byte == 0xff: self.index = 0 self.checksum = 0 elif self.index == 0: if byte != 0xff: self.checksum += byte self.pkt_bytes[0] = byte self.index += 1 else: self.checksum += byte self.pkt_bytes[self.index] = byte self.index += 1 if self.index == 7: # packet complete self.index = -1 if self.checksum & 0xff != 0xff: return CommanderRx.CHECKSUM self.lookv = self.pkt_bytes[0] - 128 # 0 - 255 ==> -128 - 127 self.lookh = self.pkt_bytes[1] - 128 self.walkv = self.pkt_bytes[2] - 128 self.walkh = self.pkt_bytes[3] - 128 self.button = self.pkt_bytes[4] self.ext = self.pkt_bytes[5] return CommanderRx.SUCCESS return CommanderRx.NOT_DONE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readCommand(self):\n while (True):\n time.sleep(1)\n # At least a package of 4 bytes (minimum)\n # [ Head | Length | Address | Data[0…N] | Check ]\n if (self._serial.inWaiting()>=4):\n # Gets only the first byte of the packet (it should be HEAD)...
[ "0.5792792", "0.56804043", "0.5637224", "0.55658627", "0.5561786", "0.5408823", "0.53773457", "0.5322868", "0.5277083", "0.52678686", "0.52137786", "0.5208731", "0.51964766", "0.5184436", "0.51623356", "0.5156039", "0.51016897", "0.51004124", "0.50996864", "0.508539", "0.5084...
0.6739887
0
Returns the parameters of the visualizer.
def parameters(self): return self._params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameters(self):\n return self.pars", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def parameters(self):\n return self.vars", "def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\...
[ "0.72978354", "0.72489446", "0.71823865", "0.71531653", "0.71393967", "0.70387006", "0.7021024", "0.69855696", "0.6905784", "0.68682903", "0.6830659", "0.6822782", "0.68215054", "0.6812191", "0.6792472", "0.67726594", "0.6756347", "0.6737905", "0.67369354", "0.67361426", "0.6...
0.67491484
17
Returns the path to a package or cwd if that cannot be found.
def _get_package_path(name): # 获取 模块包 路径, Flask() 中 引用 try: return os.path.abspath(os.path.dirname(sys.modules[name].__file__)) except (KeyError, AttributeError): return os.getcwd()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_package_path():\n package_name = get_package_name()\n return package_name.replace('.', '/')", "def get_package_dir():\n return Path(__file__).parent", "def get_packages_path_from_package(package):\n root = finder.get_package_root(package)\n\n if is_built_package(package):\n packag...
[ "0.7696517", "0.7690331", "0.74621767", "0.7385542", "0.735868", "0.7233731", "0.7224455", "0.68511313", "0.6845975", "0.6718231", "0.66957945", "0.6695532", "0.66928047", "0.6686478", "0.66783273", "0.65905523", "0.6572487", "0.6560498", "0.6526827", "0.6526827", "0.6525203"...
0.7207799
7
Opens a resource from the application's resource folder. To see
def open_resource(self, resource): if pkg_resources is None: return open(os.path.join(self.root_path, resource), 'rb') return pkg_resources.resource_stream(self.package_name, resource)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_resource(self, filename):\n assert self.current_run is not None, \"Can only be called during a run.\"\n return self.current_run.open_resource(filename)", "def getResource(self, file_name):\n path = os.path.join(os.path.dirname(__file__), \"resource\", file_name)\n return open(path)",...
[ "0.7365257", "0.73252803", "0.67678213", "0.6268737", "0.61023885", "0.60719913", "0.6015069", "0.59758013", "0.5959422", "0.59363925", "0.5917066", "0.5907269", "0.59051013", "0.5904808", "0.58840156", "0.5865891", "0.5858449", "0.58112895", "0.5795447", "0.5792268", "0.5791...
0.6912169
2
A decorator that is used to register a view function for a
def route(self, rule, **options): def decorator(f): self.add_url_rule(rule, f.__name__, **options) # 添加路由规则 self.view_functions[f.__name__] = f # 更新 视图函数集合, 前面定义,{} return f return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_view( *args, **kwargs ):", "def decorate(func):\n from aha.dispatch.router import get_router\n r = get_router()\n r.connect(None, path, controller = func, **params)\n return func", "def decorator(self, decorator: Route.Decorator):\n pass", "def _wrap...
[ "0.7289756", "0.72566867", "0.7225575", "0.69957423", "0.6992379", "0.6944056", "0.690279", "0.68110013", "0.6685898", "0.659799", "0.64419633", "0.63916886", "0.63874936", "0.63675", "0.63599527", "0.63099504", "0.6304241", "0.62933576", "0.6256121", "0.6230036", "0.62196887...
0.6250833
19
Registers a function to run before each request.
def before_request(self, f): self.before_request_funcs.append(f) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)", "def before_request(self, f):\n self.before_request_handlers.append(f)\n return f", "def before_worker_start(func):\n _func_only(func)\n worker_methods_db.register_before_start(func)\n...
[ "0.8118946", "0.7769813", "0.69504994", "0.67644304", "0.6689539", "0.65856653", "0.65145713", "0.64500964", "0.6426918", "0.6376406", "0.6375528", "0.63239646", "0.6193873", "0.6136782", "0.59849536", "0.5943737", "0.58747", "0.5869897", "0.5857567", "0.58538926", "0.5827149...
0.81964976
0
Register a function to be run after each request.
def after_request(self, f): self.after_request_funcs.append(f) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after_request_handle(self, func):\n self.after_request.append(func)\n return func", "def after_request(self, f):\n self.after_request_handlers.append(f)\n return f", "def after_request(self, f):\n self.after_request_handlers.append(f)\n return f", "def after_requ...
[ "0.7849521", "0.7643819", "0.7643819", "0.75291634", "0.62461793", "0.618368", "0.61611503", "0.61363375", "0.6126715", "0.61173093", "0.6038293", "0.5960427", "0.5939071", "0.5817307", "0.57547176", "0.57469493", "0.57465345", "0.573892", "0.56387985", "0.5626159", "0.562473...
0.79526246
0
Registers a template context processor function.
def context_processor(self, f): self.template_context_processors.append(f) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self):\n REGISTERED_FUNCTIONS[self.path] = self", "def test_enable_extension_registers_context_processors(self):\n class TestExtension(Extension):\n context_processors = ['my_custom_processor']\n\n # Back up the list, so we can replace it later.\n if hasattr(se...
[ "0.56674093", "0.56304383", "0.5513914", "0.5493239", "0.5407706", "0.5372997", "0.52977866", "0.52085143", "0.5204628", "0.51973486", "0.5171223", "0.51613235", "0.51562536", "0.51132387", "0.5089765", "0.50798845", "0.5026726", "0.499174", "0.4989556", "0.49833864", "0.4970...
0.75228786
0
Enroll a new profile to Azure Speaker ID.
def enroll_profile(region, subscription_key, wav_path): fs, audio_data = _check_and_load_wav_file_length(wav_path) profile_id = _add_profile(region, subscription_key) url = "%s/speaker/identification/v2.0/text-independent/profiles/%s/enrollments" % ( _get_azure_endpoint(region), profile_id) headers = { "Ocp-apim-subscription-key": subscription_key, "Content-Type": "audio/wav; codecs=audio/pcm; samplerate=%s" % fs, } session = requests.Session() resp = session.post(url, headers=headers, data=audio_data) print("Enrollment response status code: %s\n" % resp.status_code) print(json.dumps(json.loads(resp.content), indent=2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(self, id):\n adm = Administration()\n print(api.payload)\n lp = LearnProfile.from_dict(api.payload)\n if lp is not None:\n lp.set_id(id)\n adm.save_learnprofile(lp)\n return lp, 200\n\n else:\n return '', 500", "def perform_cr...
[ "0.57121813", "0.5669998", "0.55642205", "0.5468942", "0.54388547", "0.5423377", "0.5405765", "0.53824395", "0.5380911", "0.5366512", "0.52692974", "0.52692974", "0.52692974", "0.52692974", "0.52692974", "0.52692974", "0.525241", "0.5236725", "0.5225078", "0.52119666", "0.520...
0.72638845
0
Calculates the number of suicides for a type of agent given game mode, observability, and game seed. If game seed passed is 1, then all game seeds are aggregated.
def suicide_query(game_mode=0, observability=-1, game_seed=-1, agent=-1): event_id = "death" # Keep only those games within given configuration if game_seed != -1: selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability) & (data['game_seed'] == game_seed)] else: selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability)] if agent != -1: for index, row in selection.iterrows(): if agent not in row["agents"]: selection.drop(index, inplace=True) # print(selection.size) team_kill_count = [] ngames = 0 # Number of games in which this agent dies suicides = 0 # Number of games in which this agent commits suicide events_per_sample = [] team_kills = 0 # Iterate through selected game data for index, row in selection.iterrows(): if agent in row["agents"] and row['event_id'] == event_id: # This agent played in the game # Find its agent ID depending on its position in the agent list. There may be more than 1 agent of this # type in the game, so iterate over all and check individually. ll = row["agents"] indices = [i for i, el in enumerate(ll) if el == agent] for agent_id in indices: # teammate = (agent_id + 2) % 4 sample_event_counter = 0 for event in row["event_data"]: if event["agent_id"] == agent_id: # This agent dies if event["killer"] == agent_id: # Suicide sample_event_counter += 1 # if event["killer"] == teammate: # Killed by teammate # team_kills += 1 # if event["agent_id"] == teammate: # Teammate dies # if event["killer"] == agent_id: # Killed by this agent # team_kill_count += 1 ngames += 1 events_per_sample.append(sample_event_counter) suicides += sample_event_counter # suicide_count.append(100*suicides/ngames) # Showing percentage of game suicides # team_kill_count.append(100*team_kills/games) # percentage = 100 * suicides / ngames # mean = ngames * (percentage / 100) # variance = mean * (1 - (percentage / 100)) # std_dev = math.sqrt(variance) # std_err = std_dev / math.sqrt(ngames) # h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95 confidence interval # return percentage, h # print(events_per_sample) mean = suicides/ngames variance = sum([pow(x - mean, 2) for x in events_per_sample])/len(events_per_sample) std_dev = math.sqrt(variance) std_err = std_dev/math.sqrt(len(events_per_sample)) h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95% confidence interval return mean * 100, h * 100 # , team_kill_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])", "def test_winners_per_type_sum(self):\n sim = ss.Simulation()\n sim.run_simulation(14)\n winners = sim.winners_per_type()\n assert sum(...
[ "0.56811786", "0.5137774", "0.5076857", "0.503548", "0.4957199", "0.4900915", "0.48782182", "0.4876376", "0.48688662", "0.48500103", "0.47992226", "0.47777793", "0.475898", "0.4754263", "0.47437844", "0.47234103", "0.47161484", "0.4714737", "0.4711583", "0.47112495", "0.47099...
0.65967184
0
Solution for part one.
def solve_part_one(self): self.initialize_values_and_rules() current_bot = None ret = None while True: for k in self.bots: if len(self.bots[k]) == 2: current_bot = k if current_bot is None: break low_type, dest_low, high_type, dest_high = self.rules[current_bot] chips = sorted(self.bots[current_bot]) if chips[0] == 17 and chips[1] == 61: ret = current_bot del self.bots[current_bot] current_bot = None self.assign(low_type, dest_low, chips[0]) self.assign(high_type, dest_high, chips[1]) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task4_1(self):\n\n pass", "def exo2():", "def task4(self):\n\n pass", "def substantiate():", "def apply(self) -> None:", "def apply(self) -> None:", "def support(self):", "def mezclar_bolsa(self):", "def solve(self):", "def solvate(self):\n\n pass", "def falcon():", "...
[ "0.67894316", "0.6702227", "0.64681834", "0.6225672", "0.62181926", "0.62181926", "0.6214918", "0.62091845", "0.61323327", "0.6128199", "0.6067991", "0.60675985", "0.6043714", "0.602853", "0.60285074", "0.60285074", "0.60218054", "0.6005739", "0.5983086", "0.5963465", "0.5963...
0.0
-1
Solution for part two.
def solve_part_two(self): return self.outputs[0] * self.outputs[1] * self.outputs[2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_b2_113():\r\n pass", "def exo2():", "def exercise_b2_82():\r\n pass", "def exercise_b2_106():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_107():\...
[ "0.6879843", "0.6871387", "0.6860534", "0.6821481", "0.6766025", "0.67570114", "0.6722933", "0.66447437", "0.6609891", "0.65726656", "0.6570282", "0.65633136", "0.6554844", "0.6482532", "0.6472665", "0.64377874", "0.6428559", "0.6427989", "0.63757336", "0.6357774", "0.6328711...
0.6043972
26
Create a module item.
def create_module_item(self, module_item, **kwargs): unrequired_types = ["ExternalUrl", "Page", "SubHeader"] if isinstance(module_item, dict) and "type" in module_item: # content_id is not required for unrequired_types if module_item["type"] in unrequired_types or "content_id" in module_item: kwargs["module_item"] = module_item else: raise RequiredFieldMissing( "Dictionary with key 'content_id' is required." ) else: raise RequiredFieldMissing("Dictionary with key 'type' is required.") response = self._requester.request( "POST", "courses/{}/modules/{}/items".format(self.course_id, self.id), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r...
[ "0.7228198", "0.65279996", "0.63207406", "0.63207406", "0.6307136", "0.6218153", "0.6193564", "0.61688536", "0.6122195", "0.61181813", "0.60919625", "0.6052571", "0.6050587", "0.6030838", "0.6015427", "0.6014498", "0.6013799", "0.59456086", "0.5937084", "0.5928567", "0.592497...
0.7833921
0
Retrieve a module item by ID.
def get_module_item(self, module_item, **kwargs): module_item_id = obj_or_id(module_item, "module_item", (ModuleItem,)) response = self._requester.request( "GET", "courses/{}/modules/{}/items/{}".format( self.course_id, self.id, module_item_id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)", "def get_item_from_modulestore(usage_key, draft=False):\r\n store = modulestore('draft') if draft else modulestore('direct')\r\n ...
[ "0.7972377", "0.7206379", "0.7024295", "0.6994273", "0.6911595", "0.6899674", "0.6879004", "0.6836557", "0.6800232", "0.6766554", "0.6753238", "0.6743982", "0.66724145", "0.66661835", "0.6653945", "0.6582316", "0.6534913", "0.6518929", "0.650722", "0.6454686", "0.63841563", ...
0.6082355
38
List all of the items in this module.
def get_module_items(self, **kwargs): return PaginatedList( ModuleItem, self._requester, "GET", "courses/{}/modules/{}/items".format(self.course_id, self.id), {"course_id": self.course_id}, _kwargs=combine_kwargs(**kwargs), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_items(self):\n click.echo(\"ID --|-- Item Title\")\n for index, item in enumerate(self.items):\n click.echo(\" {} --|-- {}\".format(index, item.title))", "def get_all_items(self):\n return self.api.state['items']", "def all(self):\n return self.client.request_wit...
[ "0.7802198", "0.7336953", "0.7246295", "0.72275877", "0.72109777", "0.7051289", "0.70437056", "0.69722456", "0.6970122", "0.6892468", "0.6889149", "0.68333614", "0.68188065", "0.6816167", "0.6774345", "0.67634386", "0.6754438", "0.6714948", "0.6712662", "0.6659909", "0.664090...
0.6004262
88
Reset module progressions to their default locked state and recalculates them based on the current requirements. Adding progression requirements to an active course will not lock students out of modules they have already unlocked unless this action is called.
def relock(self, **kwargs): response = self._requester.request( "PUT", "courses/{}/modules/{}/relock".format(self.course_id, self.id), _kwargs=combine_kwargs(**kwargs), ) module_json = response.json() module_json.update({"course_id": self.course_id}) return Module(self._requester, module_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def action_lock(self):\n self.state = 'locked'", "def _reset_module_attempts(studentmodule):\r\n # load the state json\r\n problem_state = json.loads(studentmodule.state)\r\n # old_number_of_attempts = problem_state[\"attempts\"]\r\n problem_state[\"attempts\"] = 0\r\n\r\n # save\r\n stu...
[ "0.56739426", "0.55922395", "0.54264724", "0.53600764", "0.5354496", "0.5268627", "0.52453303", "0.52251", "0.5195594", "0.51758677", "0.5144368", "0.5074577", "0.506353", "0.50520587", "0.5046629", "0.5009405", "0.50016654", "0.49919286", "0.49919286", "0.49131522", "0.49008...
0.5340079
5
Mark this module item as done.
def complete(self, **kwargs): response = self._requester.request( "PUT", "courses/{}/modules/{}/items/{}/done".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mark_as_done(self):\n self.status = \"DONE\"", "def mark_as_done(self, task):\n raise NotImplementedError('')", "def item_done(self, rsp=None):\n self.export.item_done(rsp)", "def mark_as_done(self):\n\n done = self.in_progress_scroll_cell.get()\n if done is None:\n ...
[ "0.76290876", "0.7104358", "0.70515716", "0.70057595", "0.675455", "0.6700418", "0.6647", "0.65426934", "0.6470507", "0.64645696", "0.6429567", "0.64175504", "0.64175504", "0.64175504", "0.63983035", "0.6375131", "0.63530964", "0.630775", "0.62934726", "0.62571234", "0.623374...
0.6340129
17
Delete this module item.
def delete(self, **kwargs): response = self._requester.request( "DELETE", "courses/{}/modules/{}/items/{}".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __do_module_delete(item):\n\n file_path = DTF_MODULES_DIR + item.install_name\n\n if utils.delete_file(file_path) != 0:\n log.e(TAG, \"Error removing module file! Continuing.\")\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM ...
[ "0.76490295", "0.76386184", "0.7293862", "0.7082898", "0.7029256", "0.7006791", "0.6655195", "0.66186965", "0.66186965", "0.66186965", "0.66186965", "0.6595931", "0.657923", "0.65764403", "0.65764403", "0.65596324", "0.65374285", "0.6532332", "0.65124965", "0.6511554", "0.650...
0.7723441
0
Update this module item.
def edit(self, **kwargs): response = self._requester.request( "PUT", "courses/{}/modules/{}/items/{}".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateItem(self, object):\n pass", "def __update_module(item):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM modules '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n\n entry = [(item.name, item.about, ...
[ "0.7510124", "0.6885239", "0.6734443", "0.6734443", "0.6734443", "0.6712268", "0.6665374", "0.6665374", "0.6655217", "0.6655217", "0.66270894", "0.66064", "0.660304", "0.65442324", "0.6532563", "0.65279996", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554"...
0.67989796
2
Mark this module item as not done.
def uncomplete(self, **kwargs): response = self._requester.request( "DELETE", "courses/{}/modules/{}/items/{}/done".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mark_as_not_done(self):\n grade_event = {'value': 0, 'max_value': self.points}\n self.runtime.publish(self, 'grade', grade_event)", "def test_done_default_value_is_False(self):\n item = Item(name = \"A test item\")\n self.assertEqual(item.name, \"A test item\")\n self.asser...
[ "0.662007", "0.62232697", "0.61218685", "0.5933863", "0.5878998", "0.58310205", "0.5791388", "0.5753141", "0.5665221", "0.56445396", "0.5607032", "0.552165", "0.552165", "0.552165", "0.552165", "0.552165", "0.5493468", "0.545121", "0.5446436", "0.54381365", "0.54248023", "0...
0.5910725
4
fermats theorem where if a n1 = 1(mod n), n is prime,
def fermats(n): randomlist = [] for i in range(10): randomlist.append(random.randrange(2, n-1)) i += 1 for i in randomlist: if successivesquaring(i, n-1, n) != 1: return("n is composite") return("n is probably prime")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fermat_prime(n: int, k: int) -> int:\n assert n > 3 and k >= 1\n for _ in range(k):\n a = random.randint(2, n - 2)\n if pow(a, n - 1, n) != 1: # (a**(n-1)%n) != 1:\n return False\n return True", "def prime_test(n,p):\n for i in range(2, p):\n thing = 1\n wh...
[ "0.7359284", "0.7342926", "0.7322676", "0.7248131", "0.7242679", "0.7216649", "0.7209011", "0.719946", "0.71798897", "0.7179453", "0.71572655", "0.714357", "0.7136565", "0.7130218", "0.7114926", "0.71052164", "0.70893246", "0.7086887", "0.7079261", "0.7075418", "0.7074846", ...
0.7320272
3
Funkcja przyjmuje biblioteke z lista sasiadow dla kazdego wierzcholka. Przeszukuje graf w glab, zwraca kolejnosc odwiedzanych wierzcholkow
def DFS(graph): stack = [] actual_position = '1' stack.append(actual_position) visited_vertices = [] while True: for neighbors in graph.values(): try: neighbors.remove(actual_position) #usun sasiadow o wartosci aktualnej pozycji dla wszystich wierzcholkow grafu except ValueError: pass visited_vertices.append(actual_position) #odwiedzone wierzcholki try: actual_position = min(graph[actual_position]) #przejdz do sasiada o najnizszym numerze except ValueError: stack.remove(actual_position) # sciagamy ze stosu na stos if stack == []: return visited_vertices actual_position = stack.pop(-1) # ustaw z wierzchu stosu pozycje aktualna stack.append(actual_position) # dajemy na stos aktualna pozycje
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_publications(bib_format=\"dict\"):\n\n def get_bibtex(key, value):\n total_keys = [\n \"title\",\n \"journal\",\n \"volume\",\n \"issue\",\n \"number\",\n \"pages\",\n \"numpages\",\n \"year\",\n \...
[ "0.5889567", "0.556562", "0.550952", "0.5432978", "0.535185", "0.52912766", "0.52754295", "0.5218949", "0.5215099", "0.51674855", "0.5162123", "0.51455015", "0.5136601", "0.512709", "0.5073164", "0.5071011", "0.5048508", "0.50347453", "0.503294", "0.50080127", "0.49941462", ...
0.0
-1
multiply mx + b
def __init__(self, alphabet, m, b): # We're cheating here by not actually having the decryption method use the "inverse" argument transformed = alphabet.affinal(m, b) super(AffineCipher, self).__init__(alphabet, transformed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mul(self, a, b):\n return a * b", "def multiply(self, a, b):\n return a * b", "def addmul(a,b):\n return a*b+a*b", "def mul(a,b):\r\n return a*b", "def matmul(a, b):\n raise NotImplementedError", "def mul(x, y):\n return multiply(x, y)", "def _mul(a, b):\n return a * b"...
[ "0.7423531", "0.7327661", "0.73207116", "0.7261051", "0.72316414", "0.7190819", "0.71892846", "0.7182553", "0.7133698", "0.71076566", "0.7082618", "0.7080708", "0.7065419", "0.7065419", "0.7065419", "0.7065419", "0.7065419", "0.7064199", "0.70600754", "0.7036849", "0.7033024"...
0.0
-1
Use the args to identify the appropriate model class
def __createCovidModelInstance(self, *args, **kwargs): try: if 'MODEL_TYPE' in kwargs: if kwargs['MODEL_TYPE'] == CovidModel.AGGREGATE_CASES_DECEASED: covidModel = CovidAggregateTotals() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return if kwargs['MODEL_TYPE'] == CovidModel.MONTHLY_CASES_DECEASED: covidModel = CovidMonthlyTotals() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return if kwargs['MODEL_TYPE'] == CovidModel.PAST_30_DAYS: covidModel = CovidDailyTotals() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return if kwargs['MODEL_TYPE'] == CovidModel.MESSAGES: covidModel = CovidMessages() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return if kwargs['MODEL_TYPE'] == CovidModel.LOCATIONS: covidModel = CovidLocationInfo() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return print ("CovidMessages.__createCovidModelInstance() - did not receive a recognizable model type - no model object instantiated. Args received = ",kwargs) return None except: print ("CovidMessages.__createCovidModelInstance() - unexpected error: ",sys.exc_info()[0]) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_class(self, *args, **kwargs):", "def convert_to_model(self, *args):", "def do_create(self, argv):\n if argv in self.__names:\n new_instance = self.__names[argv]()\n new_instance.save()\n print(\"{}\".format(new_instance.id))\n elif len(argv) is 0:\n ...
[ "0.7021158", "0.6954336", "0.6561183", "0.6485083", "0.6346957", "0.63075995", "0.6284837", "0.6276257", "0.6271687", "0.62641424", "0.62498045", "0.6244961", "0.6220076", "0.6219613", "0.62072104", "0.61494976", "0.612144", "0.6111465", "0.60642654", "0.599565", "0.5991916",...
0.57535255
44
Get List of Following Based on user id
def get_following_by_user(request): response, status_code = get_followings(request) if status_code != 200: return JsonResponse(response, status=status_code) serialize_data = FollowingSerializer(response, many=False).data return JsonResponse(serialize_data, status=status_code, safe=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_following(user_id):\n return list(Forward.objects.filter(source_id=user_id).values_list(\n 'destination_id', flat=True))", "def get_followings(request):\n user_id = request.GET.get(\"user_id\")\n if not user_id:\n return {\"error\": \"User Id should be provided\"}, 400\n followi...
[ "0.8282055", "0.7685639", "0.76842815", "0.76074857", "0.7492455", "0.7339311", "0.7327111", "0.7205236", "0.71110606", "0.7069504", "0.70240045", "0.69244426", "0.68713117", "0.68708545", "0.68651557", "0.6858568", "0.68433887", "0.67436033", "0.6737113", "0.67269635", "0.66...
0.7454274
5
Aux function for permutation_t_test (for parallel comp).
def _max_stat(X, X2, perms, dof_scaling): n_samples = len(X) mus = np.dot(perms, X) / float(n_samples) stds = np.sqrt(X2[None, :] - mus * mus) * dof_scaling # std with splitting max_abs = np.max(np.abs(mus) / (stds / sqrt(n_samples)), axis=1) # t-max return max_abs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PermutationTest(self):\n # U = union of B and T\n union_sample = np.concatenate((self.x_benchmark, self.x_trial), axis=0)\n n_samples = self.NB + self.NT\n \n # Initialize array of test statistic values\n self.TS_tilde = np.zeros(self.n_perm, dtype=np.float)\n \...
[ "0.6563847", "0.65312606", "0.627106", "0.625189", "0.62488693", "0.6234156", "0.62234485", "0.6194745", "0.60110074", "0.59988236", "0.59065497", "0.5900468", "0.5838992", "0.5809497", "0.58039767", "0.5691553", "0.5606442", "0.5583079", "0.555996", "0.5553208", "0.55497146"...
0.0
-1
One sample/paired sample permutation test based on a tstatistic. This function can perform the test on one variable or simultaneously on multiple variables. When applying the test to multiple variables, the "tmax" method is used for adjusting the pvalues of each variable for multiple comparisons. Like Bonferroni correction, this method adjusts pvalues in a way that controls the familywise error rate. However, the permutation method will be more powerful than Bonferroni correction when different variables in the test
def permutation_t_test( X, n_permutations=10000, tail=0, n_jobs=None, seed=None, verbose=None ): from .cluster_level import _get_1samp_orders n_samples, n_tests = X.shape X2 = np.mean(X**2, axis=0) # precompute moments mu0 = np.mean(X, axis=0) dof_scaling = sqrt(n_samples / (n_samples - 1.0)) std0 = np.sqrt(X2 - mu0**2) * dof_scaling # get std with var splitting T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples)) rng = check_random_state(seed) orders, _, extra = _get_1samp_orders(n_samples, n_permutations, tail, rng) perms = 2 * np.array(orders) - 1 # from 0, 1 -> 1, -1 logger.info("Permuting %d times%s..." % (len(orders), extra)) parallel, my_max_stat, n_jobs = parallel_func(_max_stat, n_jobs) max_abs = np.concatenate( parallel( my_max_stat(X, X2, p, dof_scaling) for p in np.array_split(perms, n_jobs) ) ) max_abs = np.concatenate((max_abs, [np.abs(T_obs).max()])) H0 = np.sort(max_abs) if tail == 0: p_values = (H0 >= np.abs(T_obs[:, np.newaxis])).mean(-1) elif tail == 1: p_values = (H0 >= T_obs[:, np.newaxis]).mean(-1) elif tail == -1: p_values = (-H0 <= T_obs[:, np.newaxis]).mean(-1) return T_obs, p_values, H0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PermutationTest(self):\n # U = union of B and T\n union_sample = np.concatenate((self.x_benchmark, self.x_trial), axis=0)\n n_samples = self.NB + self.NT\n \n # Initialize array of test statistic values\n self.TS_tilde = np.zeros(self.n_perm, dtype=np.float)\n \...
[ "0.66151273", "0.6504498", "0.60829633", "0.6001584", "0.5969407", "0.5941336", "0.5911625", "0.5903978", "0.5805787", "0.5780182", "0.57714087", "0.56875885", "0.56720537", "0.56512654", "0.5612089", "0.55364573", "0.5505601", "0.55000603", "0.5491501", "0.54649895", "0.5463...
0.70762056
0
Get confidence intervals from nonparametric bootstrap.
def bootstrap_confidence_interval( arr, ci=0.95, n_bootstraps=2000, stat_fun="mean", random_state=None ): if stat_fun == "mean": def stat_fun(x): return x.mean(axis=0) elif stat_fun == "median": def stat_fun(x): return np.median(x, axis=0) elif not callable(stat_fun): raise ValueError("stat_fun must be 'mean', 'median' or callable.") n_trials = arr.shape[0] indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too rng = check_random_state(random_state) boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices))) stat = np.array([stat_fun(arr[inds]) for inds in boot_indices]) ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100) ci_low, ci_up = np.percentile(stat, ci, axis=0) return np.array([ci_low, ci_up])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence_intervals(data):\r\n\r\n x_bar = np.nanmean(data) # Mean value\r\n s = np.nanstd(data) # Standard deviation\r\n n = len(data) # Sample size\r\n\r\n lo_conf = x_bar - (1.96 * (s / np.sqrt(n))) # Lower bound of confidence interval\r\n hi_conf = x_bar + (1.96 * ...
[ "0.6679884", "0.6648013", "0.6620062", "0.66174746", "0.6499037", "0.646875", "0.6136825", "0.6136825", "0.61319476", "0.6111559", "0.60788226", "0.6075902", "0.6074073", "0.5812202", "0.5790033", "0.57862234", "0.57668763", "0.57511514", "0.574001", "0.5700028", "0.5675738",...
0.6944301
0
Calculate confidence interval. Aux function for plot_compare_evokeds.
def _ci(arr, ci=0.95, method="bootstrap", n_bootstraps=2000, random_state=None): if method == "bootstrap": return bootstrap_confidence_interval( arr, ci=ci, n_bootstraps=n_bootstraps, random_state=random_state ) else: from .parametric import _parametric_ci return _parametric_ci(arr, ci=ci)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence_interval(self):\r\n coh_var = np.zeros((self.input.data.shape[0],\r\n self.input.data.shape[0],\r\n self._L), 'd')\r\n for i in range(self.input.data.shape[0]):\r\n for j in range(i):\r\n if i != j:\r\n ...
[ "0.67617357", "0.67138135", "0.65338296", "0.6432632", "0.6410642", "0.6392413", "0.6342866", "0.6287954", "0.6283847", "0.62821895", "0.60609925", "0.60566366", "0.60566366", "0.60537523", "0.5997194", "0.597665", "0.597048", "0.5967721", "0.59433526", "0.59037447", "0.59007...
0.5617236
37
Handler for Skill Launch.
def launch_request_handler(handler_input): # type: (HandlerInput) -> Response speech = "Welcome to the Merriam-Webster Dictionary. What word can I look up for you?" reprompt = "You can say: definition of word, example of word, or synonym of word." handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_launch(launch_request, session):\r\n\r\n #print(\"****on_launch requestId=\" + launch_request['requestId'] +\r\n # \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\n # Dispatch to...
[ "0.7680054", "0.7644753", "0.7633401", "0.7608987", "0.7601289", "0.7596736", "0.75540215", "0.75540215", "0.755073", "0.7501771", "0.7501771", "0.7501771", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", ...
0.56940675
48
Handler for Help Intent.
def help_intent_handler(handler_input): # type: (HandlerInput) -> Response handler_input.response_builder.speak(help_text).ask(help_text) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Help(self, event):\n Help(self)", "def on_bot_help(update, _context):\n update.message.reply_text(c.MSG_HELP)", "def help_handler(bot, update):\n logger.info(f\"Help command received. Chat ID: {update.message.chat_id}\")\n update.message.reply_text(config.HELP_MESSAGE)", "def onHelp(sel...
[ "0.78951234", "0.7641993", "0.7596383", "0.75123024", "0.7342911", "0.7307534", "0.72152776", "0.71921116", "0.7188953", "0.7140122", "0.7140122", "0.7116266", "0.7116266", "0.7107287", "0.7099637", "0.7098016", "0.7098016", "0.7098016", "0.7098016", "0.7089918", "0.7089918",...
0.72494954
8
Single handler for Cancel and Stop Intent.
def cancel_and_stop_intent_handler(handler_input): # type: (HandlerInput) -> Response speech_text = "Goodbye!" return handler_input.response_builder.speak(speech_text).response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancel_and_stop_intent_handler(handler_input):\n return cancel_and_stop_request(handler_input, QUIT_MINUS_POINTS)", "def cancel_and_stop_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Stopping.\"\n\n return handler_input.response_builder.speak(speech_text).s...
[ "0.69462967", "0.67611676", "0.66303724", "0.6449503", "0.6449503", "0.64251965", "0.64251965", "0.6423484", "0.62693846", "0.61464477", "0.6137985", "0.6082715", "0.6080662", "0.6080662", "0.6080662", "0.60464203", "0.60244685", "0.6008756", "0.59916556", "0.5986091", "0.594...
0.63202274
10
Handler for Session End.
def session_ended_request_handler(handler_input): # type: (HandlerInput) -> Response return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def end_session(self):\n\t\t...", "def on_session_ended(session_ended_request, session):\n print(\"END SESSION\")\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(...
[ "0.7643362", "0.76416", "0.7545587", "0.7457293", "0.7457293", "0.7399156", "0.72849315", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.7...
0.0
-1
Check if word is provided in slot values. Send word to URLbuilder and return JSON data. Give user definition information.
def my_word_definition_handler(handler_input): # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if word_slot in slots: curr_word = slots[word_slot].value handler_input.attributes_manager.session_attributes[ word_slot_key] = curr_word try: response = http_get(curr_word, False) if response: speech = ("The definition of {} with part of speech {} " "is: {}".format(curr_word, response[0]['fl'], response[0]['shortdef'][0])) reprompt = ("What word would you like me to look up?") else: speech = ("I am sorry I could not find the word {}").format(curr_word) reprompt = ("What word would you like me to look up?") except: speech = ("I am sorry I could not find the word {}. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = "I'm not sure what word to look up, please try again" reprompt = ("I didn't catch that. What word would you like me " "me to look up?") handler_input.attributes_manager.session_attributes[previous_key] = speech handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if example_slot in slots:\n curr_word = slots[example_slot].value\n handler_input.attributes_manager.session_attributes[\n example_slo...
[ "0.61216205", "0.58270305", "0.58179843", "0.57400197", "0.5649318", "0.5606277", "0.5591704", "0.5455674", "0.5431328", "0.5341701", "0.5329532", "0.52859133", "0.5281709", "0.52787936", "0.52630574", "0.51918024", "0.51817644", "0.5147042", "0.51228225", "0.5112976", "0.511...
0.6205886
0
This function handles the example sentence intent
def my_word_example_handler(handler_input): # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if example_slot in slots: curr_word = slots[example_slot].value handler_input.attributes_manager.session_attributes[ example_slot_key] = curr_word try: response = http_get(curr_word, False) if response: example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0] if example == "vis": vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t']) speech = ("An example with {} (part of speech {}) " "is: {}".format(curr_word, response[0]['fl'], vis)) elif example == "wsgram": vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t']) speech = ("An example with {} (part of speech {}) " "is: {}".format(curr_word, response[0]['fl'], vis)) else: speech = ("No example is available for {}").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = ("No example is available for {}").format(curr_word) reprompt = ("What word would you like me to look up?") except Exception as e: speech = ("No example is available for {}. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = "I'm not sure what word to look up, please try again" reprompt = ("I didn't catch that. What word would you like me " "me to look up?") handler_input.attributes_manager.session_attributes[previous_key] = speech handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sentence(self):", "def onCurrentSentence(self, *_args):\n global instance\n log(str(_args))\n #if (instance.isSpeaking and len(_args[1])==0): instance.SpeakDone()\n return", "def hook(self, sentence, words):\n pass", "def handle_gui_example_three_intent(self, messag...
[ "0.67601943", "0.6399362", "0.6366831", "0.63550425", "0.6288494", "0.62700784", "0.6220967", "0.6183083", "0.6126741", "0.6104425", "0.60613656", "0.60591984", "0.6058863", "0.59571433", "0.5951869", "0.59310657", "0.5929599", "0.5913799", "0.5897607", "0.589439", "0.5894384...
0.653233
1
Look up word in thesaurus
def my_word_example_handler(handler_input): # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if synonym_slot in slots: curr_word = slots[synonym_slot].value handler_input.attributes_manager.session_attributes[ synonym_slot_key] = curr_word try: synonyms = http_get(curr_word, True) if type(synonyms[0]) == dict: speech = ("A synonym for {} is {}".format(curr_word, synonyms[0]['meta']['syns'][0][0])) synonym_list = synonyms[0]['meta']['syns'][0] reprompt = ("What word would you like a synonym for?") else: speech = ("No synonyms for {} are available. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like a synonym for?") except: speech = ("No synonyms for {} are available. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like a synonym for?") else: speech = "I'm not sure what word to find a synonym for, please try again" reprompt = ("I didn't catch that. What word would you like me " "me to look up a synonym for?") handler_input.attributes_manager.session_attributes[previous_key] = speech handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lookup(self, word):\n word = word.lower()\n if self.stemmer:\n word = self.stemmer.stem(word)\n \n return [self.documents.get(id, None) for id in self.index.get(word)]", "def thesaurus(self, message):\n read_pointer = open('Thesaurus.txt')\n\n for line in read_po...
[ "0.71993554", "0.6819215", "0.6670339", "0.6578899", "0.6435337", "0.6270199", "0.6232829", "0.6166652", "0.61357725", "0.60544753", "0.59272295", "0.59103805", "0.59084", "0.59001476", "0.58918864", "0.58608", "0.5855626", "0.5844507", "0.58381885", "0.58219075", "0.58169264...
0.0
-1
AMAZON.FallbackIntent is only available in enUS locale. This handler will not be triggered except in that locale, so it is safe to deploy on any locale.
def fallback_handler(handler_input): # type: (HandlerInput) -> Response speech = ( "The {} skill can't help you with that. " "I can look up a word in the dictionary for you").format(skill_name) reprompt = ("I can look up a word in the dictionary, " "Just say any word in English") handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fall_back_message():\r\n card_title = \"Fallback Message\"\r\n fallback_string = \"Sorry. I couldn't understood it. Please say again.\"\r\n should_end_session = False\r\n session_attributes = { \r\n \"speech_output\": fallback_string,\r\n \r\n ...
[ "0.5877872", "0.5590493", "0.54645765", "0.54645765", "0.5408452", "0.5357488", "0.5357488", "0.53020567", "0.52696717", "0.5181241", "0.5174385", "0.5174385", "0.5167612", "0.51544356", "0.5071739", "0.5060613", "0.5031619", "0.49786136", "0.4932573", "0.4868343", "0.4852090...
0.5607832
1
convert ssml speech to text, by removing html tags.
def convert_speech_to_text(ssml_speech): # type: (str) -> str s = SSMLStripper() s.feed(ssml_speech) return s.get_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ssml_to_text(ssml):\n return re.sub(r\"<[^>]+>\", \"\", ssml)", "def convert_text_to_ssml(chunk):\r\n # Escape chars that are forbidden in SSML\r\n chunk = chunk.replace('\"', \"&quot;\").replace('&', \"&amp;\").replace(\"'\", \"&apos;\").replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")\r\n # <p...
[ "0.74603456", "0.71631587", "0.63201004", "0.6288619", "0.6236227", "0.6161472", "0.6157704", "0.61351144", "0.6124986", "0.6062542", "0.6052379", "0.5990713", "0.5988849", "0.59758717", "0.5871905", "0.5855006", "0.5835654", "0.58335525", "0.58257437", "0.5820562", "0.579984...
0.7831128
3
Add a card by translating ssml text to card content.
def add_card(handler_input, response): # type: (HandlerInput, Response) -> None response.card = SimpleCard( title=skill_name, content=convert_speech_to_text(response.output_speech.ssml))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addContent(text):", "def createTextCard(self, x, z):\n self.myText = TextNode('scrollValue')\n funcs.setZeroToText(self.myText, self.designsLeft)\n self.enableSubmit()\n self.myText.setFont(self.font)\n self.myText.setCardColor(globals.colors['guiblue3'])\n self.myTe...
[ "0.61664975", "0.6056356", "0.6032832", "0.5922822", "0.5634238", "0.5634238", "0.5634238", "0.5634238", "0.5613592", "0.5592778", "0.5525272", "0.54307914", "0.53953665", "0.5383566", "0.53688663", "0.53124416", "0.52927655", "0.52916425", "0.5291438", "0.52559805", "0.52509...
0.622388
2
Log response from alexa service.
def log_response(handler_input, response): # type: (HandlerInput, Response) -> None print("Alexa Response: {}\n".format(response))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_response(self, response):\n log.debug(\"Received response: %s\", response)", "def on_a(self):\r\n self.log()", "def log_response(task_request, response):\n msg = \"{0.status_code} {0.reason} for {0.url}: {0.content}\".format(response)\n log_info(task_request, msg)", "def __call__(s...
[ "0.6283806", "0.6229867", "0.60874593", "0.6051032", "0.6027314", "0.5980479", "0.5956262", "0.5953968", "0.5945938", "0.5923983", "0.57441443", "0.5655665", "0.5653018", "0.56518316", "0.5630625", "0.5618453", "0.5608918", "0.56035566", "0.55921704", "0.55480033", "0.5538451...
0.7056861
2
Log request to alexa service.
def log_request(handler_input): # type: (HandlerInput) -> None print("Alexa Request: {}\n".format(handler_input.request_envelope.request))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_request(self):\n log = self.server.log\n if log:\n if hasattr(log, \"info\"):\n log.info(self.format_request() + '\\n')\n else:\n log.write(self.format_request() + '\\n')", "def _log_request(res: SpamResult) -> None:\n _log.info(f\"requestId=[{request.id}] result=[{res.label}] reaso...
[ "0.68515676", "0.6780339", "0.6731675", "0.67187905", "0.6713761", "0.6655223", "0.66517013", "0.64173084", "0.6374145", "0.63045406", "0.6302998", "0.6223099", "0.62073785", "0.6184737", "0.61604834", "0.6096278", "0.6084718", "0.60662866", "0.60651225", "0.59968835", "0.594...
0.7240547
2
Catch all exception handler, log exception and respond with custom message.
def all_exception_handler(handler_input, exception): # type: (HandlerInput, Exception) -> None print("Encountered following exception: {}".format(exception)) speech = "That word is not in the dictionary. Please choose another word." handler_input.response_builder.speak(speech).ask(speech) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n speech = \"Sorry, an exception occurred. Please say again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n return handler_input.response_bui...
[ "0.71580285", "0.69891423", "0.68551576", "0.68176335", "0.66826653", "0.65763485", "0.6536817", "0.6521186", "0.6516178", "0.65054154", "0.64914984", "0.6473506", "0.6451797", "0.6449465", "0.64328057", "0.63658404", "0.6359318", "0.6342113", "0.632365", "0.6301916", "0.6286...
0.5860892
68
Return images for one given region, owned by self
def getImages(region): creds = credentials() try: conn = ec2.connect_to_region(region, **creds) images = conn.get_all_images(owners=['self']) except boto.exception.EC2ResponseError: return [] return images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def get_image_by_version(self, region, version=None):\n pass", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_ima...
[ "0.72098744", "0.719231", "0.70230323", "0.64151984", "0.63881266", "0.6098165", "0.6044503", "0.60154766", "0.6011218", "0.6006212", "0.6000203", "0.59448403", "0.5925909", "0.5922348", "0.5905722", "0.5890936", "0.5886264", "0.58704627", "0.58661884", "0.5858619", "0.584289...
0.6458549
3
Return list of snapshot_ids associated with the given image
def getSnapshotsOf(image): snapshotIds = [] deviceMapping = image.block_device_mapping # dict of devices devices = deviceMapping.keys() for d in devices: snapshotId = deviceMapping[d].snapshot_id if snapshotId is not None: snapshotIds.append(snapshotId.encode()) return snapshotIds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mappin...
[ "0.82660115", "0.6894388", "0.6246945", "0.6215981", "0.61946225", "0.60906774", "0.6078066", "0.607749", "0.5850587", "0.5829291", "0.5715038", "0.57149994", "0.5700483", "0.56884575", "0.5666648", "0.5638562", "0.56358933", "0.56192404", "0.55592096", "0.55305934", "0.55255...
0.8464934
0
Use dictionaries 'cos we'll have to crossreference to get snapshots that go with the AMIs returns list of dictionaries representing images from one region
def getImagesD(region): images = getImages(region) imageDicts = [] for im in images: imageDict = {"name": im.name, "id": im.id, "region": im.region.name, "state": im.state, "created": im.creationDate, "type": im.type, "KEEP": getKeepTag(im), "name_tag": get_name_tag(im), "snapshots": getSnapshotsOf(im), "description": im.description, "PROD": isProduction(im) } imageDicts.append(imageDict) return imageDicts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n ...
[ "0.74375165", "0.669343", "0.6320971", "0.6303299", "0.6287237", "0.6220304", "0.6202331", "0.6175909", "0.60951483", "0.6012867", "0.60106635", "0.59991914", "0.59578496", "0.5940436", "0.5936577", "0.5930096", "0.5909181", "0.5908506", "0.5903435", "0.58975405", "0.5881977"...
0.7583085
0
return a list of dictionaries representing snapshots from one region
def getSnapshotsD(region): # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it) snapshots = getSnapshots(region) snapshotsDicts = [] ims = getImages(region) for s in snapshots: amis = getAmisOf(s, ims) amiIds = [] amiKeeps = [] if len(amis) == 1: amiIds = amis[0].id.encode() amiKeeps = getKeepTag(amis[0]) elif len(amis) == 0: amiIds = "-------no-AMI-found" amiKeeps = "-------no-AMI-found" else: for a in amis: amiIds.append(a.id.encode()) amiKeeps.append(getKeepTag(a)) snapshotsDict = {"id": s.id, "status": s.status, "region": s.region.name, "progress": s.progress, "start_time": s.start_time, "volume_id": s.volume_id, "volume_size": s.volume_size, "KEEP-tag": getKeepTag(s), "Name": get_name_tag(s), "AMI(s)": amiIds, "AMI_KEEP-tags": amiKeeps, "PROD": isProduction(s), "Description": s.description } snapshotsDicts.append(snapshotsDict) return snapshotsDicts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def items(self):\n ...
[ "0.71616983", "0.68013346", "0.64756656", "0.6455287", "0.6392508", "0.6385133", "0.63725513", "0.6275416", "0.6235013", "0.62241733", "0.6165991", "0.6158216", "0.6134359", "0.605335", "0.6047018", "0.6043569", "0.6014061", "0.5964335", "0.5959861", "0.59526664", "0.5890748"...
0.78463835
0
return a list of dictionaries representing volumes from one region
def getVolumesD(region): volumes = getVolumes(region) instances = getInstancesD(region) volumesDicts = [] for v in volumesDicts: volumesDict = {"id": v.id, "KEEP-tag": getKeepTag(v), "instance_KEEP-tag": getKeepTag(getInstanceOf(v)), "instance": v.attach_data.instance_id, "status": v.status, "size": v.size, "create-time": v.create_time, "region": v.region.name, "zone": v.zone, "snapshot_id": v.snapshot_id, "PROD": isProduction(v) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return regi...
[ "0.70386356", "0.68808", "0.6829185", "0.68042403", "0.6804185", "0.66357", "0.65796185", "0.6541075", "0.64998627", "0.6476049", "0.6467089", "0.6443325", "0.641429", "0.64000684", "0.62908834", "0.62458056", "0.61916316", "0.6191406", "0.611974", "0.6119484", "0.61047727", ...
0.77420294
0
return a list of dictionaries representing instances for one region, will help with volumeinstanceKEEPtag lookup. Maybe.
def getInstancesD(region): instances = getInstances(region) instancesDicts = {"id": i.id, "KEEP-tag": getKeepTag(i), "instance_type": i.instance_type, "state": i.state, "launch_time": i.launch_time, "security_groups": getGroups(i), "region": i.region.name, "PROD": isProduction(i) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n ...
[ "0.72672004", "0.71622515", "0.68902445", "0.68787754", "0.68357", "0.6603997", "0.6595504", "0.6591907", "0.6584421", "0.6475704", "0.6437322", "0.64160633", "0.63718474", "0.63532615", "0.63506424", "0.63015544", "0.62685406", "0.6161752", "0.6136149", "0.61351824", "0.6126...
0.7885678
0
retrieve list of AMIs that refer to a given snapshot
def getAmisOf(snapshot, images): amis = [] for im in images: snapshotsOfThisIm = getSnapshotsOf(im) for soti in snapshotsOfThisIm: if soti == snapshot.id: amis.append(im) return amis
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_amis_of(snapshot_id):\n mes_amis = []\n # There has GOT to be a better way. Hmm... maybe not\n keys = Ims.spreadsheet.keys()\n for key in keys:\n if snapshot_id in Ims.spreadsheet[key]['associated_snapshots']:\n mes_amis.append(key)\n return mes_...
[ "0.72130966", "0.6595902", "0.6541823", "0.6166669", "0.61240566", "0.60049653", "0.59619623", "0.5923196", "0.5922994", "0.5888076", "0.5858373", "0.5827235", "0.58137566", "0.580514", "0.57657516", "0.57389045", "0.5632397", "0.56060064", "0.5601814", "0.56017476", "0.55515...
0.77030766
0
If tag with key='KEEP' exists, return its value (can be an empty string), else it's 'notag'
def getKeepTag(obj): if 'KEEP' in obj.tags: return obj.tags['KEEP'] else: return "-------no-tag" # try: # tag = obj.tags['KEEP'] # except: # # Note: some with empty KEEP-tags, through web console they look the same as those untagged # return "-----" # return tag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keep_tag(obj):\n if 'KEEP' in obj.tags and len(obj.tags['KEEP'].strip()) != 0:\n return obj.tags['KEEP']\n else:\n return \"-------no-tag\"", "def tag_word(self, w): \n if self.unknown(w):\n return self.default_tag\n else:\n return ma...
[ "0.80788904", "0.5658676", "0.550251", "0.54886025", "0.53662133", "0.53030056", "0.52832705", "0.5201634", "0.51524514", "0.5135101", "0.51192683", "0.5106311", "0.5085387", "0.50634223", "0.50538707", "0.5035368", "0.49847323", "0.4977521", "0.49762967", "0.49733838", "0.49...
0.8014554
1
Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key
def isProduction(obj): return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag_key_exists(self, key):\n return key in self.map", "def hastag(obj, key):\n key = TAG_PREFIX + key\n if not isinstance(obj, unittest.TestCase):\n return hasattr(obj, key)\n tc_method = getattr(obj, obj._testMethodName)\n return hasattr(tc_method, key) or hasattr(obj, key)", "de...
[ "0.63550603", "0.60692143", "0.59001714", "0.586084", "0.5793219", "0.57157636", "0.56831205", "0.56796306", "0.566162", "0.5619104", "0.5589269", "0.5552662", "0.55072737", "0.5455847", "0.5455847", "0.544153", "0.5431722", "0.54038215", "0.5395981", "0.5394134", "0.5378807"...
0.7877264
0
Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources
def get_name_tag(obj): if 'Name' in obj.tags: return obj.tags['Name'] else: return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wantsNametag(self):\n return 0", "def get_name_tag(obj):\n if 'Name' in obj.tags:\n return obj.tags['Name']\n else:\n return \"\"", "def tag(self) -> str:\n return self.name or ''", "def tag(self,name):\n return self._tags.get(name,None)", "def n...
[ "0.72620493", "0.7172717", "0.6880746", "0.682264", "0.681301", "0.67561567", "0.6694516", "0.6682618", "0.65618443", "0.6456841", "0.6435155", "0.63498676", "0.6329023", "0.6329023", "0.6329023", "0.6329023", "0.6329023", "0.6329023", "0.6262894", "0.62420034", "0.62270963",...
0.7165239
2
Returns the actual instance (if only instance_id is needed, can access directly from volume) (if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances)
def getInstanceOf(volume): # ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever) creds = credentials() conn = ec2.connect_to_region(volume.region.name, **creds) ins_id = volume.attach_data.instance_id reservation = conn.get_all_instances(instance_ids=ins_id)[0] return reservation.instances[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instance(self, instance_id):\n return self.instances.get(instance_id)", "def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]"...
[ "0.7441181", "0.7072989", "0.7054633", "0.7043838", "0.6983329", "0.69746256", "0.6915153", "0.6853248", "0.65906733", "0.65776145", "0.65776145", "0.65766394", "0.65577894", "0.65343606", "0.6420164", "0.6343189", "0.63384813", "0.63070196", "0.6281745", "0.62794745", "0.626...
0.70240617
4
Write volumes to file
def generateInfoVolumes(regions): print "\nWriting volumes info to output file %s" % volumes_data_output_file with open(volumes_data_output_file, 'w') as f1: f1.write("VOLUMES\n") f1.write( "Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n") for r in regions: volumes = getVolumes(r) print "." # give some feedback to the user for v in volumes: f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size, v.create_time, v.region.name, v.zone, v.snapshot_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_inventory_file(inventory_item):\n try:\n with open('inventory', 'w') as file:\n file.write(inventory_item)\n except OSError:\n pass", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def add_writable_file_vol...
[ "0.62202305", "0.59282184", "0.59282184", "0.5797909", "0.57411486", "0.570655", "0.5696261", "0.5676645", "0.5656309", "0.56229556", "0.5577266", "0.5544393", "0.55409503", "0.5499961", "0.5476858", "0.5472212", "0.54446274", "0.5439978", "0.54305", "0.54144007", "0.53972524...
0.59575117
1
Write snapshots to file
def generateInfoSnapshots(regions): print "Writing snapshots info to output file %s" % snapshots_data_output_file snapshots = [] for r in regions: snapshots += getSnapshotsD(r) print "." # feedback for the user with open(snapshots_data_output_file, 'w') as f2: f2.write("SNAPSHOTS\n") f2.write( "Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus" "\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n") for s in snapshots: f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'], s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_snapshot(self):\n json.dump(self.snapshot, open(paths.RESULTS_FILE, 'w'), indent=4, sort_keys=True)", "def saveSnapshot(self, filename): \n\t\tpass", "def write_to_file(self, filename: str) -> None:", "def save_snapshot(args):\n html_doc = document.Document(get_code(args.file))\n ...
[ "0.7562754", "0.743017", "0.65671015", "0.65112525", "0.64657086", "0.64657086", "0.6451764", "0.64466846", "0.6253342", "0.617585", "0.612374", "0.61043537", "0.60520536", "0.6024088", "0.6023335", "0.6000197", "0.59535676", "0.58751094", "0.58477306", "0.5843929", "0.583734...
0.6254951
8
Write snapshots to file
def generateInfoInstances(regions): print "Writing instances info to output file %s" % instances_data_output_file with open(instances_data_output_file, 'w') as f3: f3.write("INSTANCES\n") f3.write("Name\tinstance ID\tKEEP-tag\tproduction\tinstance_type\tstate\tlaunched\tsecurity_groups\tregion\n\n") for region in regions: print "." # feedback for user instances = getInstances(region) for i in instances: f3.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(i), i.id, getKeepTag(i), isProduction(i), i.instance_type, i.state, i.launch_time, getGroups(i), i.region.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_snapshot(self):\n json.dump(self.snapshot, open(paths.RESULTS_FILE, 'w'), indent=4, sort_keys=True)", "def saveSnapshot(self, filename): \n\t\tpass", "def write_to_file(self, filename: str) -> None:", "def save_snapshot(args):\n html_doc = document.Document(get_code(args.file))\n ...
[ "0.7562754", "0.743017", "0.65671015", "0.65112525", "0.64657086", "0.64657086", "0.6451764", "0.64466846", "0.6254951", "0.6253342", "0.617585", "0.612374", "0.61043537", "0.60520536", "0.6024088", "0.6023335", "0.6000197", "0.59535676", "0.58751094", "0.58477306", "0.584392...
0.0
-1
Returns the application directory.
def get_appdir(): return APP_PATH
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def app_dir(self):\n return self._app_dir", "def appdata_dir(self) -> str:\n return os.path.join(self._project_dir, 'appdata')", "def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The applic...
[ "0.8366667", "0.7430168", "0.73666745", "0.7327246", "0.731593", "0.73079205", "0.7295801", "0.7282597", "0.7222026", "0.7218878", "0.72007245", "0.7196958", "0.71940666", "0.7193182", "0.71729344", "0.7168441", "0.7165291", "0.7127867", "0.70903516", "0.70903516", "0.708375"...
0.8947229
0
Return the TSV file corresponding to the current annotation level.
def tsv_name(): if PAR['level'] == 1: return 'col.tsv' else: return 'myc.tsv'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsv_value(self):\n return self.tsv_file.getvalue()", "def export_tsv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".tsv\",\n filetypes=((\"tab seperated values\", \"*.tsv\"),\n (\"All Files\", \"*.*\")))\n if ...
[ "0.5939053", "0.5729971", "0.54536223", "0.51073116", "0.5047787", "0.5043582", "0.50271034", "0.5015402", "0.48891845", "0.48566693", "0.48362452", "0.48115683", "0.4806834", "0.4803698", "0.48016763", "0.47835353", "0.47271547", "0.47254357", "0.47245374", "0.47214988", "0....
0.6666001
0
Indicate whether the current level is level 1 (colonization).
def colonization(): return get('level') == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_single_level(self):\n return self.fragments_tree.height <= 2", "def is_flat(self):\n if self.master:\n return self.master.is_flat\n\n return len(self.levels) == 1", "def is_top_level(self) -> bool:\n return self._indent == ''", "def top_left_dot(self) -> bool:\n ...
[ "0.6308522", "0.60663605", "0.592682", "0.57595533", "0.5757749", "0.56357706", "0.56121695", "0.55404365", "0.5528773", "0.55099", "0.55035543", "0.54934734", "0.5432446", "0.5344056", "0.53366804", "0.53177035", "0.5317035", "0.5309982", "0.52847475", "0.52829605", "0.52761...
0.7897029
0
Indicate whether the current level is level 2 (AM fungal structures).
def intra_struct(): return get('level') == 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_subgroup(self, right):\n if right.level() == 1:\n return True\n if is_Gamma0(right):\n return self.level() % right.level() == 0\n if is_Gamma1(right):\n if right.level() >= 3:\n return False\n elif right.level() == 2:\n ...
[ "0.6321547", "0.62854135", "0.62632716", "0.6087753", "0.59138495", "0.5752362", "0.5700526", "0.56212634", "0.55640566", "0.55273724", "0.5408885", "0.5408016", "0.53746355", "0.53725296", "0.5317222", "0.5312037", "0.5294672", "0.52933645", "0.52883124", "0.5273141", "0.525...
0.66111106
0
Defines arguments used in training mode.
def training_subparser(subparsers): parser = subparsers.add_parser('train', help='learns how to identify AMF structures.', formatter_class=RawTextHelpFormatter) x = PAR['batch_size'] parser.add_argument('-b', '--batch_size', action='store', dest='batch_size', metavar='NUM', type=int, default=x, help='training batch size.' '\ndefault value: {}'.format(x)) x = PAR['drop'] parser.add_argument('-k', '--keep_background', action='store_false', dest='drop', default=x, help='keep all background tiles.' '\nby default, downscale background to equilibrate classes.') x = PAR['data_augm'] parser.add_argument('-a', '--data_augmentation', action='store_true', dest='data_augm', default=x, help='apply data augmentation (hue, chroma, saturation, etc.)' '\nby default, data augmentation is not used.') x = PAR['save_augmented_tiles'] parser.add_argument('-sa', '--save_augmented_tiles', action='store', dest='save_augmented_tiles', metavar='NUM', type=int, default=x, help='save a subset of augmented tiles.' '\nby default, does not save any tile.') x = PAR['summary'] parser.add_argument('-s', '--summary', action='store_true', dest='summary', default=x, help='save CNN architecture (CNN graph and model summary)' '\nby default, does not save any information.') x = PAR['outdir'] parser.add_argument('-o', '--outdir', action='store', dest='outdir', default=x, help='folder where to save trained model and CNN architecture.' '\ndefault: {}'.format(x)) x = PAR['epochs'] parser.add_argument('-e', '--epochs', action='store', dest='epochs', metavar='NUM', type=int, default=x, help='number of epochs to run.' '\ndefault value: {}'.format(x)) x = PAR['patience'] parser.add_argument('-p', '--patience', action='store', dest='patience', metavar='NUM', type=int, default=x, help='number of epochs to wait before early stopping is triggered.' '\ndefault value: {}'.format(x)) x = PAR['learning_rate'] parser.add_argument('-lr', '--learning_rate', action='store', dest='learning_rate', metavar='NUM', type=int, default=x, help='learning rate used by the Adam optimizer.' '\ndefault value: {}'.format(x)) x = PAR['vfrac'] parser.add_argument('-vf', '--validation_fraction', action='store', dest='vfrac', metavar='N%', type=int, default=x, help='Percentage of tiles used for validation.' '\ndefault value: {}%%'.format(x)) level = parser.add_mutually_exclusive_group() level.add_argument('-1', '--CNN1', action='store_const', dest='level', const=1, help='Train for root colonisation (default)') level.add_argument('-2', '--CNN2', action='store_const', dest='level', const=2, help='Train for fungal hyphal structures.') x = None parser.add_argument('-net', '--network', action='store', dest='model', metavar='H5', type=str, default=x, help='name of the pre-trained network to use as a basis for training.' '\ndefault value: {}'.format(x)) parser.add_argument('-sr', '--super_resolution', action='store_const', dest='super_resolution', const=True, help='Apply super-resolution before predictions.' '\ndefault value: no super-resolution.') x = None parser.add_argument('-g', '--generator', action='store', dest='generator', metavar='H5', type=str, default=x, help='name of the pre-trained generator.' '\ndefault value: {}'.format(x)) x = None parser.add_argument('-d', '--discriminator', action='store', dest='discriminator', metavar='H5', type=str, default=x, help='name of the pre-trained discriminator.' '\ndefault value: {}'.format(x)) x = PAR['input_files'] parser.add_argument('image', nargs='*', default=x, help='plant root image to process.' '\ndefault value: {}'.format(x)) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feed_training_args(self):\n return {}", "def add_train_val_arguments(self):\n self.add_train_arguments()\n self.add_val_arguments()", "def add_train_arguments(self):\n parser = self.parser\n parser.add_argument(\"source_dir\", help=\"Directory containing test source images.\"...
[ "0.7736733", "0.7350229", "0.72321266", "0.72048527", "0.7092233", "0.6984818", "0.69702905", "0.6870562", "0.6834763", "0.6788515", "0.6737505", "0.6594925", "0.6524672", "0.6498421", "0.64617753", "0.64401156", "0.64344376", "0.643193", "0.64313674", "0.6415906", "0.6410304...
0.0
-1
Defines arguments used in prediction mode.
def prediction_subparser(subparsers): parser = subparsers.add_parser('predict', help='Runs AMFinder in prediction mode.', formatter_class=RawTextHelpFormatter) x = PAR['tile_edge'] parser.add_argument('-t', '--tile_size', action='store', dest='edge', type=int, default=x, help='Tile size (in pixels) used for image segmentation.' '\ndefault value: {} pixels'.format(x)) parser.add_argument('-sr', '--super_resolution', action='store_const', dest='super_resolution', const=True, help='Apply super-resolution before predictions.' '\ndefault value: no super-resolution.') x = 'SRGANGenv1beta.h5' parser.add_argument('-g', '--generator', action='store', dest='generator', metavar='H5', type=str, default=x, help='name of the pre-trained generator.' '\ndefault value: {}'.format(x)) x = PAR['colormap'] parser.add_argument('-map', '--colormap', action='store', dest='colormap', metavar='id', type=str, default=x, help='Name of the colormap used to display conv2d outputs and kernels.' '\ndefault value: {}'.format(x)) x = 'CNN1v2.h5' parser.add_argument('-net', '--network', action='store', dest='model', metavar='H5', type=str, default=x, help='name of the pre-trained model to use for predictions.' '\ndefault value: {}'.format(x)) parser.add_argument('-so', '--save_conv2d_outputs', action='store_const', dest='save_conv2d_outputs', const=True, help='save conv2d outputs in a separate zip file.' '\ndefault value: False') parser.add_argument('-sk', '--save_conv2d_kernels', action='store_const', dest='save_conv2d_kernels', const=True, help='save convolution kernels in a separate zip file (takes time).' '\ndefault value: False') x = PAR['input_files'] parser.add_argument('image', nargs='*', default=x, help='plant root scan to be processed.' '\ndefault value: {}'.format(x)) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_args(self, args: Namespace) -> None:\n self.epochs = args.epochs\n self.lrdecay = args.lrdecay\n self.lrpatience = args.lrpatience\n self.ntest = args.ntest\n self.ndiscard = args.ndiscard\n self.predict = args.predict\n self.printfreq = args.printfreq\n ...
[ "0.68575656", "0.68137014", "0.6691477", "0.6603834", "0.65638775", "0.65195256", "0.6398639", "0.6300945", "0.6298256", "0.6298256", "0.6298256", "0.6298256", "0.6298256", "0.62570894", "0.6243932", "0.6173412", "0.61286485", "0.6126391", "0.61123145", "0.61077875", "0.60999...
0.0
-1
Defines arguments used in diagnostic mode.
def diagnostic_subparser(subparsers): parser = subparsers.add_parser('diagnose', help='Runs AMFinder in diagnostic mode.', formatter_class=RawTextHelpFormatter) x = 'CNN1_pretrained_2021-01-18.h5' parser.add_argument('-net', '--network', action='store', dest='model', metavar='H5', type=str, default=x, help='name of the pre-trained model to use for diagnostic.' '\ndefault value: {}'.format(x)) x = PAR['input_files'] parser.add_argument('image', nargs='*', default=x, help='plant root scan to be processed.' '\ndefault value: {}'.format(x)) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_arguments(self, parser):", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n ...
[ "0.67259675", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.65979254", "0.6563293", "0.65430915", "0.6472136", "0.64492387", "0.64492387", "0.64112806", "0.63782823", "0.6311546", "0.6246682", "0.6226592", "0.619988...
0.0
-1
Builds AMFinder commandline parser.
def build_arg_parser(): main = ArgumentParser(description='AMFinder command-line arguments.', allow_abbrev=False, formatter_class=RawTextHelpFormatter) subparsers = main.add_subparsers(dest='run_mode', required=True, help='action to be performed.') _ = training_subparser(subparsers) _ = prediction_subparser(subparsers) _ = diagnostic_subparser(subparsers) return main
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_parser(self, parser: ArgumentParser) -> None:", "def create_parser() -> configargparse.ArgParser:\n parser = configargparse.ArgParser(default_config_files=[\n \"/etc/lookout/analyzer.conf\", \"~/.config/lookout/analyzer.conf\"],\n formatter_class=ArgumentDefaultsHelpFormatterNoNone,\n ...
[ "0.69869363", "0.6829675", "0.6746405", "0.6673381", "0.66188794", "0.658462", "0.6580858", "0.6543717", "0.65387374", "0.6531024", "0.6523515", "0.6503992", "0.6499173", "0.64667153", "0.6449182", "0.6419928", "0.6393331", "0.6363311", "0.6360073", "0.6353206", "0.63527614",...
0.7635935
0
Returns absolute paths to input files.
def abspath(files): files = sum([glob.glob(x) for x in files], []) return [os.path.abspath(x) for x in files]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inputpathabs(self):\n return os.path.abspath(self.inputpath)", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(...
[ "0.6970691", "0.6879517", "0.6868738", "0.6868052", "0.67055565", "0.6700424", "0.65872943", "0.657331", "0.6540866", "0.651123", "0.65038157", "0.65028954", "0.6485323", "0.6448438", "0.63885504", "0.6380545", "0.63705975", "0.63195634", "0.6316908", "0.63139904", "0.6297403...
0.71486807
0
Import image settings (currently tile edge).
def update_tile_edge(path): zfile = os.path.splitext(path)[0] + '.zip' if zf.is_zipfile(zfile): with zf.ZipFile(zfile) as z: if 'settings.json' in z.namelist(): x = z.read('settings.json').decode('utf-8') x = yaml.safe_load(x) set('tile_edge', x['tile_edge']) return get('tile_edge')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_image(self, file: str) -> Any:\n pass", "def importImg(self):\n logger.info(\"import image \"+ str(self))\n file,types = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose Image',\n BASE_DIR,\"Image files (*.jpg *.gif *.png)\")\n logger.debug(file)\n self.imageFil...
[ "0.6126535", "0.610119", "0.5611701", "0.5601905", "0.55795354", "0.554965", "0.54795593", "0.5368683", "0.53583604", "0.53392506", "0.5296245", "0.525413", "0.525413", "0.5213607", "0.5202142", "0.51820916", "0.51820916", "0.51820916", "0.51808995", "0.5156906", "0.5153215",...
0.0
-1
Filter input file list and keep valid JPEG or TIFF images.
def get_input_files(): raw_list = abspath(get('input_files')) valid_types = ['image/jpeg', 'image/tiff'] images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types] print('* Input images: {}'.format(len(images))) return images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]", "def filter_bad_names(self, images):\r\n good_images = []\r\n for image in images:\r\n ...
[ "0.74834895", "0.6394286", "0.6174496", "0.61535", "0.5974321", "0.59707564", "0.59085125", "0.58987886", "0.5869271", "0.5863154", "0.5833285", "0.58235216", "0.57972664", "0.5788407", "0.57858866", "0.57686263", "0.5761519", "0.5749259", "0.57330817", "0.5702307", "0.568775...
0.6194896
2
Read command line and store user settings.
def initialize(): parser = build_arg_parser() par = parser.parse_known_args()[0] # Main arguments. set('run_mode', par.run_mode) set('input_files', par.image) # Sub-parser specific arguments. if par.run_mode == 'train': set('batch_size', par.batch_size) set('drop', par.drop) set('epochs', par.epochs) set('model', par.model) set('level', par.level) set('vfrac', par.vfrac) set('data_augm', par.data_augm) set('summary', par.summary) set('outdir', par.outdir) # Parameters associated with super-resolution. set('super_resolution', par.super_resolution) set('generator', par.generator) set('discriminator', par.discriminator) elif par.run_mode == 'predict': set('tile_edge', par.edge) set('model', par.model) set('save_conv2d_kernels', par.save_conv2d_kernels) set('save_conv2d_outputs', par.save_conv2d_outputs) set('colormap', par.colormap) # Parameters associated with super-resolution. set('super_resolution', par.super_resolution) set('generator', par.generator) elif par.run_mode == 'diagnose': set('model', par.model) else: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()", "def load_user_from_cli():\n load_user()\n\n # Flag for checking if there is a need to update the config by writing to a file\n update = False\n\n if SETTINGS[\"user\"][\"name\"] is None:\n SETTINGS[\"user\"][...
[ "0.7167355", "0.68207556", "0.64562625", "0.6258873", "0.6210641", "0.611424", "0.6007026", "0.59203523", "0.5906044", "0.5899174", "0.5890425", "0.5873461", "0.586222", "0.58594745", "0.58452356", "0.58350545", "0.5832136", "0.58280176", "0.5820044", "0.58118266", "0.5798276...
0.0
-1
I send echo pck when the ttl is 0 so when it arrive to the GW he send me back a TTL ERROR (ICMP MESSEGE) , the dst is our ip.
def find_my_IP_and_MAC(): mac = ':'.join(re.findall('..', '%012x' % getnode())) # I write IP and not domain cause i want to save time. p = sr1(IP(dst="google.com", ttl=0) / ICMP() / "XXXXXXXXXXX",verbose=0,timeout=5) #verbose = withuot output return mac,p.dst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pinger(dst):\n icmpId = 0x4711\n for i in range(MAX_NUM_PROBES):\n # Erzeuge das ICMP Echo Anfrage-Paket (type=8).\n # Der Parameter seq ist ein Zaehler fuer die Anfrage-Pakete.\n icmpPkt = ICMP() # Hier muss ergaenzt werden!\n \"\"\"\n scapy liefert die Klasse ICMP mit...
[ "0.7043539", "0.6484077", "0.64424086", "0.6386618", "0.63455755", "0.6326406", "0.6275683", "0.62598705", "0.6095228", "0.5987131", "0.59356433", "0.58306605", "0.5739227", "0.57313937", "0.5700737", "0.56921375", "0.5688917", "0.5655082", "0.56411165", "0.56379056", "0.5627...
0.0
-1
send echo pck when the ttl is 0 so when it arrive to the GW he send me back a TTL ERROR (ICMP MESSEGE) , the src is the GW
def get_GW(): p = sr1(IP(dst="google.com", ttl=0) / ICMP() / "XXXXXXXXXXX",verbose=0) return p.src
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_one_ping(mySocket, destIP, myID, mySeqNumber, packet_size):\n \n myChecksum = 0 # contador da soma de verificação\n\n # Faça um cabeçalho fictício com uma soma de verificação 0\n # Retorne uma string contendo os valores compactados de acordo com o formato especificado. \n header = struct.pac...
[ "0.64529324", "0.63816625", "0.6354133", "0.6196364", "0.61904454", "0.61332846", "0.6091843", "0.59220237", "0.5845445", "0.5802433", "0.5794148", "0.5763908", "0.57570946", "0.56948966", "0.56458026", "0.56425583", "0.563012", "0.56200063", "0.561671", "0.5613484", "0.55894...
0.0
-1
load Load all modules.
def load(self): self.commands = { # Usual text commands (e.g. "/echo 123") 'user': {}, 'owner': { 'load': self.load, 'modprobe': self.modprobe, 'rmmod': self.rmmod }, # Modules for bot's reaction to a different message types 'text': {}, 'photo': {}, 'audio': {}, 'video': {}, 'sticker': {}, 'voice': {} } for file in os.listdir('modules'): if file.endswith('.py'): command_type, command = file.split('_', 1) self.modprobe(self, command[:-3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadModule(*args, allModules: bool=True, load: AnyStr=\"\", scan: bool=True,\n **kwargs)->List[AnyStr]:\n pass", "def _load_modules(self):\n modules_src = os.path.abspath(\"src/modules\")\n\n # perform a tree walk over modules directory\n for file_name, file_path in self...
[ "0.7162257", "0.677256", "0.6646025", "0.6645682", "0.6506407", "0.6479407", "0.6455077", "0.6443325", "0.6390843", "0.6241457", "0.623022", "0.62124735", "0.62108576", "0.62056154", "0.6129464", "0.61284095", "0.6105363", "0.60891134", "0.6054746", "0.60284793", "0.60165846"...
0.6359435
9
Loads openpose data for all videos.
def load_op_data_all(json_dirs, video_metadata_intel): frame_list_all_videos = [] for path_to_json, vm in zip(json_dirs, video_metadata_intel): frame_list_all_videos.append(load_op_data(path_to_json, vm)) return frame_list_all_videos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_video_data(self):\n self.file_videos = [\n Video.from_file(path, self)\n for path in self.video_dir.glob('*.json')\n ]", "def loadData(catalog):\n loadVideos(catalog)", "def load_videos(self):\n logging.debug(\"Loading videos data...\")\n\n # loadin...
[ "0.7148177", "0.6771924", "0.6767525", "0.6552482", "0.6189131", "0.61795473", "0.61142415", "0.6013824", "0.5968695", "0.5942266", "0.5874326", "0.58726317", "0.5843663", "0.5814855", "0.5791894", "0.5758234", "0.57317847", "0.5715237", "0.57054555", "0.57054555", "0.5681245...
0.6053022
7
Loads openpose data for single video.
def load_op_data(path_to_json, vm, person_metric="largest_bbox"): frame_list = [] keypoint_files = [os.path.join(path_to_json, pos_json) for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')] for js in keypoint_files: with open(js) as json_file: keypoint_data = json.load(json_file) # preprocess keypoint data per person detected (list of all coords --> dict of parts to coords) max_bbox_sz = 0 #TODO (see below todo) selected_part_data = {} for person_id in range(len(keypoint_data['people'])): pose_keypoints_2d = keypoint_data['people'][person_id]['pose_keypoints_2d'] part_data = {} for index in INDEX_TO_PART:# transform keypoint unstructured list into dictionary of parts keypoint_index = index * POINTS_PER_PART part_data[index] = pose_keypoints_2d[keypoint_index : keypoint_index + POINTS_PER_PART] if len(part_data[index]) != 0: #normalize part_data[index][0] /= vm.width part_data[index][1] /= vm.height else: part_data[index] = [0, 0, 0] # select person w/ largest bbox # TODO: change to more generalized metric if needed; just change this snippet plus max_bbox_sz declaration above if person_metric == "largest_bbox": curr_bbox_sz = get_bbox_size(part_data) elif person_metric == "rightmost_bbox": curr_bbox_sz = get_bbox_maxpt(part_data) if curr_bbox_sz > max_bbox_sz: max_bbox_sz = curr_bbox_sz selected_part_data = part_data if len(keypoint_data['people']) == 0: # no people detected in this frame, set all values in frame to zero selected_part_data = {part_index: [0, 0, 0] for part_index in range(len(INDEX_TO_PART)) } frame_list.append(selected_part_data) return frame_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def __loadVideo(self):\n # Check if movie file exists ...\n #\n if not(os.path.isfile(self.fNameVideo)):\n return stm.StimErrC.videoFileNotFound\n\n try: \n # Load video\n #\n ...
[ "0.63587195", "0.6327954", "0.6313313", "0.6260543", "0.6144583", "0.60049814", "0.6003176", "0.5981438", "0.58994627", "0.5867908", "0.57829833", "0.5776408", "0.57461214", "0.57129264", "0.5711681", "0.5697466", "0.5672912", "0.55612403", "0.5529223", "0.552714", "0.5512627...
0.0
-1
Print allocation and remaining quota in Sqkm.
def quota(): try: fname = os.path.join(os.path.expanduser("~"), ".planet.json") contents = {} if os.path.exists(fname): with open(fname, "r") as fp: contents = json.loads(fp.read()) else: raise IOError("Escape to End and Initialize") if not len(contents) != 0: raise IOError("Escape to End and Initialize") else: k = contents["key"] main = requests.get( "https://api.planet.com/auth/v1/" + "experimental/public/my/subscriptions", auth=HTTPBasicAuth(k, ""), ) if main.status_code == 200: content = main.json() for item_id in content: print(" ") print("Allocation Name: %s" % item_id["organization"]["name"]) print( "Allocation active from: %s" % item_id["active_from"].split("T")[0] ) print("Quota Enabled: %s" % item_id["quota_enabled"]) print("Total Quota in SqKm: %s" % item_id["quota_sqkm"]) print("Total Quota used: %s" % item_id["quota_used"]) if (item_id["quota_sqkm"]) is not None: leftquota = float( item_id["quota_sqkm"] - float(item_id["quota_used"]) ) print("Remaining Quota in SqKm: %s" % leftquota) else: print("No Quota Allocated") print("") else: print("Failed with exception code: " + str(main.status_code)) except IOError: print("Initialize client or provide API Key")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_usage(self):\r\n print 'Total Usage: %f compute seconds' % self.box_usage\r\n cost = self.box_usage * 0.14\r\n print 'Approximate Cost: $%f' % cost", "def print_usage(self):\n print('Total Usage: %f compute seconds' % self.box_usage)\n cost = self.box_usage * 0.14\n ...
[ "0.6803348", "0.6786766", "0.62010103", "0.60411453", "0.60358113", "0.603348", "0.5993506", "0.58677566", "0.58565897", "0.5847982", "0.5795429", "0.57485247", "0.5704235", "0.5681458", "0.5679012", "0.564224", "0.56345713", "0.56312704", "0.56190854", "0.554365", "0.5539084...
0.6106883
3