query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Get a rule violators who have been ejected.
def get_violators(self): return self.violators
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def affecteds(self):\n return [m for m in self.members if m.disease == PedigreeMember.AFFECTED]", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_s...
[ "0.6156995", "0.59065264", "0.5629059", "0.5474041", "0.53551775", "0.52045214", "0.5177488", "0.50929904", "0.5079941", "0.5059264", "0.5059264", "0.5011072", "0.4973407", "0.49627775", "0.4929745", "0.4920347", "0.4912848", "0.4911266", "0.4906332", "0.48964974", "0.4874137...
0.61693317
0
Get a copy of the current state.
def get_current_state(self): return deepcopy(self.state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_state(self):\n return copy.deepcopy(self._state)", "def __getstate__(self):\n state = self.__dict__.copy()\n self.__cleanState__(state)\n return state", "def state(self):\n return self._state.copy()", "def get_state(self, deepcopy: bool = True):\n s = self.cache_...
[ "0.8455732", "0.79240113", "0.78669524", "0.78233796", "0.76678777", "0.76537627", "0.7607961", "0.75942713", "0.7512252", "0.74859667", "0.74796313", "0.7326282", "0.7263944", "0.7236717", "0.7214216", "0.7181021", "0.7169202", "0.71566755", "0.715247", "0.7142748", "0.71140...
0.81724
1
A decorator that wraps the passed in function and raises exception if headers with token is missing
def require_auth(function): @functools.wraps(function) def wrapper(self, *args, **kwargs): if not self.headers: raise LoginRequiredError return function(self, *args, **kwargs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n token = request.headers['token']\n try:\n decoded = decode_token(token)\n except jwt.ExpiredSignatureError:\n return jsonify({\"message\": \"token expired\"}...
[ "0.8132438", "0.7699005", "0.7501329", "0.74680185", "0.7427506", "0.742177", "0.7420473", "0.7285155", "0.7139443", "0.7120114", "0.7101173", "0.70718706", "0.7041331", "0.7013722", "0.70107657", "0.70075893", "0.67916787", "0.6714881", "0.6710947", "0.6683059", "0.6663674",...
0.7472541
3
Sigmoid backward (derivative) implementation
def sigmoid_backward(dA, Z): dsig = sigmoid(Z) * (1 - sigmoid(Z)) return dA * dsig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sigmoid_derivative(x):\n return x * (1-x)", "def sigmoid_derivative(x):\n return x * (1.0 - x)", "def derivative_sigmoid(x):\n return x * (1 - x)", "def derivative_sigmoid(x):\n return x * (1 - x)", "def sigmoid_derivative(x):\n\n return sigmoid(x) * (1 - sigmoid(x))", "def sigmoid_bac...
[ "0.85681444", "0.8473839", "0.831426", "0.831426", "0.8309925", "0.8075164", "0.80000556", "0.79696095", "0.79516095", "0.7931595", "0.7929404", "0.7801728", "0.7743298", "0.7714867", "0.7661006", "0.76579946", "0.7626087", "0.76228863", "0.7605027", "0.753502", "0.75197905",...
0.82721937
5
RELU backward (derivative) implementation
def relu_backward(dA, Z): dZ = np.array(dA, copy=True) dZ[Z <= 0] = 0 return dZ
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relu_backward(dout, cache):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # ...
[ "0.72977805", "0.715568", "0.7135363", "0.7078485", "0.6929383", "0.69284886", "0.6913763", "0.69110596", "0.69005686", "0.69005686", "0.6865896", "0.6833532", "0.682018", "0.6802795", "0.6802623", "0.6784269", "0.6782692", "0.67817545", "0.6776672", "0.6749691", "0.6739863",...
0.69511014
4
Given a pool name, returns a storage driver.
def _init_driver(self, pool_id, pool_conf=None): if pool_id is not None: pool = self._pools_ctrl.get(pool_id, detailed=True) else: pool = pool_conf conf = utils.dynamic_conf(pool['uri'], pool['options'], conf=self._conf) storage = utils.load_storage_driver(conf, self._cache, control_driver=self.control) return pipeline.DataDriver(conf, storage, self.control)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_driver(self, pool_id, pool_conf=None):\n\n try:\n return self._drivers[pool_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[pool_id] = self._init_driver(pool_id, pool_conf)\n\n return self._drivers[pool_...
[ "0.70222354", "0.6526476", "0.6414902", "0.63886523", "0.6257161", "0.6163656", "0.6162145", "0.61483705", "0.61444455", "0.61236185", "0.5998005", "0.59904814", "0.5983413", "0.58813095", "0.58759", "0.58657277", "0.58505595", "0.58058536", "0.5719337", "0.56997657", "0.5679...
0.548844
29
Get the ID for the pool assigned to the given queue.
def _pool_id(self, queue, project=None): return self._catalogue_ctrl.get(project, queue)['pool']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _get_work_pool_queue_id_from_name(\n self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str\n ) -> UUID:\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(\n session=session,\n work_pool_name=work_pool_name,\n work...
[ "0.7236343", "0.7009467", "0.6953268", "0.6914265", "0.6697232", "0.66893286", "0.6669339", "0.6644398", "0.65078753", "0.6422109", "0.63314754", "0.63168746", "0.63056153", "0.62965095", "0.6249178", "0.6227463", "0.62212545", "0.61551255", "0.61551255", "0.6059136", "0.6049...
0.8461307
0
Register a new queue in the pool catalog. This method should be called whenever a new queue is being created, and will create an entry in the pool catalog for the given queue. After using this method to register the queue in the catalog, the caller should call `lookup()` to get a reference to a storage driver which will allow interacting with the queue's assigned backend pool.
def register(self, queue, project=None, flavor=None): # NOTE(gengchc): if exist, get queue's pool.flavor: # if queue's pool.flavor is different, first delete it and add it. # Otherwise, if the flavor in the meteredata of the queue is # modified, the catalog will be inconsistent. if self._catalogue_ctrl.exists(project, queue): catalogue = self._catalogue_ctrl.get(project, queue) oldpoolids = catalogue['pool'] oldpool = self._pools_ctrl.get(oldpoolids) oldflavor = oldpool['flavor'] msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s ' ', new flavor: %(flavor)s') LOG.info(msgtmpl, {'oldflavor': oldflavor, 'flavor': flavor}) if oldpool['flavor'] != flavor: self._catalogue_ctrl.delete(project, queue) if not self._catalogue_ctrl.exists(project, queue): if flavor is not None: flavor = self._flavor_ctrl.get(flavor, project=project) pools = self._pools_ctrl.get_pools_by_flavor( flavor=flavor, detailed=True) pool = select.weighted(pools) pool = pool and pool['name'] or None msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s') LOG.info(msgtmpl, {'flavor': flavor.get('name', None)}) else: # NOTE(flaper87): Get pools assigned to the default # group `None`. We should consider adding a `default_group` # option in the future. pools = self._pools_ctrl.get_pools_by_flavor(detailed=True) pool = select.weighted(pools) pool = pool and pool['name'] or None if not pool: # NOTE(flaper87): We used to raise NoPoolFound in this # case but we've decided to support automatic pool # creation. Note that we're now returning and the queue # is not being registered in the catalogue. This is done # on purpose since no pool exists and the "dummy" pool # doesn't exist in the storage if self.lookup(queue, project) is not None: return raise errors.NoPoolFound() msgtmpl = _(u'register queue to pool: new flavor: None') LOG.info(msgtmpl) msgtmpl = _(u'register queue: project:%(project)s' ' queue:%(queue)s pool:%(pool)s') LOG.info(msgtmpl, {'project': project, 'queue': queue, 'pool': pool}) self._catalogue_ctrl.insert(project, queue, pool)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self, queue, project=None):\n # NOTE(cpp-cabrera): only register a queue if the entry\n # doesn't exist\n if not self._catalogue_ctrl.exists(project, queue):\n # NOTE(cpp-cabrera): limit=0 implies unlimited - select from\n # all shards\n shard = se...
[ "0.71361166", "0.6695945", "0.64565563", "0.6338736", "0.6160316", "0.6083422", "0.5962883", "0.59370244", "0.5895258", "0.5773519", "0.5767887", "0.5727844", "0.5669611", "0.5621137", "0.55773187", "0.5558326", "0.5552847", "0.55482846", "0.55192804", "0.5471164", "0.5442383...
0.7536504
0
Removes a queue from the pool catalog. Call this method after successfully deleting it from a backend pool.
def deregister(self, queue, project=None): self._catalogue_ctrl.delete(project, queue)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_queue(self):\n self.work_queue_client.delete_queue()", "def _queue_delete(self, queue):\n\n queue.delete()", "def remove_queue(self, queue) -> None:\r\n self.receive_queues.remove(queue)", "def remove_queue(self, queue):\n with self.mutex:\n self.queues.remov...
[ "0.72310483", "0.7100992", "0.70777076", "0.70770454", "0.6994525", "0.699254", "0.6895938", "0.6883569", "0.68662935", "0.6845864", "0.6806441", "0.68028337", "0.67998916", "0.6713241", "0.67060405", "0.66622037", "0.64299446", "0.6404603", "0.63555455", "0.6346353", "0.6262...
0.70685136
4
Lookup the queue controller for the given queue and project.
def get_queue_controller(self, queue, project=None): target = self.lookup(queue, project) return target and target.queue_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller", "def get_message_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.message_controller", "d...
[ "0.77063626", "0.7698841", "0.76416427", "0.7288941", "0.6930836", "0.6912205", "0.60668814", "0.606344", "0.60406834", "0.594424", "0.5849223", "0.5683665", "0.5642507", "0.560627", "0.559546", "0.55402505", "0.55328107", "0.5492972", "0.54287374", "0.5398488", "0.539153", ...
0.86451554
0
Lookup the message controller for the given queue and project.
def get_message_controller(self, queue, project=None): target = self.lookup(queue, project) return target and target.message_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller", "def g...
[ "0.80495954", "0.73924774", "0.73809624", "0.6577108", "0.6438663", "0.6041202", "0.5956915", "0.5211523", "0.5192648", "0.5166102", "0.5160857", "0.5156492", "0.5145304", "0.51198655", "0.51061904", "0.50889635", "0.49702215", "0.49656522", "0.49377242", "0.49108028", "0.487...
0.8593036
0
Lookup the claim controller for the given queue and project.
def get_claim_controller(self, queue, project=None): target = self.lookup(queue, project) return target and target.claim_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_control...
[ "0.7787301", "0.7567917", "0.7170647", "0.62005764", "0.6165711", "0.5723963", "0.5608801", "0.5049111", "0.5029624", "0.48735002", "0.48166627", "0.4790605", "0.47787577", "0.47474897", "0.47414377", "0.4711115", "0.47098687", "0.47030538", "0.4683094", "0.46805832", "0.4673...
0.8776693
0
Lookup the subscription controller for the given queue and project.
def get_subscription_controller(self, queue, project=None): target = self.lookup(queue, project) return target and target.subscription_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller", "def g...
[ "0.76404476", "0.7447651", "0.69627404", "0.63039684", "0.62465614", "0.6126285", "0.60098344", "0.5344913", "0.52650946", "0.5261455", "0.5224008", "0.51507324", "0.51497537", "0.5039444", "0.5002221", "0.4991361", "0.49808443", "0.49703738", "0.4967121", "0.4910888", "0.487...
0.8589521
0
Lookup the topic controller for the given queue and project.
def get_topic_controller(self, topic, project=None): target = self.lookup(topic, project) return target and target.topic_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_control...
[ "0.7733818", "0.7603851", "0.74324155", "0.728382", "0.63744485", "0.6344827", "0.57456493", "0.5728621", "0.56051105", "0.55933976", "0.55091506", "0.5342035", "0.5326094", "0.53004175", "0.5222844", "0.520292", "0.5191208", "0.51723534", "0.5128026", "0.5117455", "0.508792"...
0.7806749
0
Lookup a pool driver for the given queue and project.
def lookup(self, queue, project=None): try: pool_id = self._pool_id(queue, project) except errors.QueueNotMapped as ex: LOG.debug(ex) return self.get_default_pool(use_listing=False) return self.get_driver(pool_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer d...
[ "0.7773466", "0.6421427", "0.61614996", "0.58672553", "0.58434623", "0.5762848", "0.56201243", "0.5590634", "0.5587302", "0.5487426", "0.54723954", "0.5447157", "0.5354828", "0.5348546", "0.53464115", "0.52886975", "0.5276325", "0.5234317", "0.521486", "0.518824", "0.51820177...
0.8907264
0
Get storage driver, preferably cached, from a pool name.
def get_driver(self, pool_id, pool_conf=None): try: return self._drivers[pool_id] except KeyError: # NOTE(cpp-cabrera): cache storage driver connection self._drivers[pool_id] = self._init_driver(pool_id, pool_conf) return self._drivers[pool_id]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_storage_backend(self):\n return self.client.info()['Driver']", "def storage_backend_get_by_name(context, name, inactive=False):\n return _find_storage_backend(context, dict(name = name), True, None, inactive=inactive)", "def _get_driver(self, driver_name):\n driver = lb_const.SERVICE_T...
[ "0.6834399", "0.68316096", "0.6733757", "0.67019266", "0.66133344", "0.6580576", "0.65020525", "0.63906044", "0.6277041", "0.6253371", "0.6227523", "0.6190082", "0.6147456", "0.61158353", "0.60346544", "0.60328054", "0.60253716", "0.6005569", "0.5980574", "0.5942194", "0.5914...
0.7380291
0
Open the URL for the given DOI in the default browser
def open_doi(doi): webbrowser.open_new_tab(DOI_URL % doi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_in_browser(self):\n webbrowser.open(self.url)", "def open(url):\r\n webbrowser.open(url)", "def browser_open(story_id, arguments):\r\n\r\n story = load_story(story_id, arguments)\r\n\r\n webbrowser.open(story.url)", "def open_url(name):\n url = localReadConfig.get_webServer(name)\...
[ "0.7175303", "0.70528334", "0.6925984", "0.68447846", "0.66982275", "0.6694972", "0.654901", "0.65459543", "0.64876926", "0.6484498", "0.6445093", "0.64057773", "0.63870794", "0.6381969", "0.63739616", "0.6357972", "0.6353263", "0.62887174", "0.62686133", "0.6265236", "0.6234...
0.82702404
0
Given a FireBrowser object will then check to see if there are notifications then will go through the list and respond appropriately to them.
def run(browser: FireBrowser): midline("STATUS UPDATING") if browser.find_tab("Portal") is not -1: browser.switch_tab("Portal") elif not browser.tab_names: login.run(browser) else: login.run(browser) file = FileHandle("status") data = file.read_to_data() browser.remind_me_later() if browser.check_selector(data['notify_check']): write("Looking at Notifications...") while delete_notifications(browser, data): browser.click(data['notify_dropdown_click']) while __find_updateable_notification(browser, data): """Check to see if asurion or if appointment.""" browser.click(data['lead_textarea']) headings = browser.get_elements_text(data['lead_type_heading']) if contain_list("Appointment", headings): browser.click(data['status_select']) browser.click(data['status_select_awaiting']) browser.send_text(data['appointment_text'], 'lead_textarea') elif contain_list("Asurion", headings): browser.send_text(data['asurion_text'], 'lead_textarea')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notifications(self):\r\n return notifications.Notifications(self)", "def notifications(self):\r\n return notifications.Notifications(self)", "def get_user_notifications(self, login):", "async def update_cache_from_notification(self) -> List[Notification]:\n new_notifications = []\n ...
[ "0.5953895", "0.5953895", "0.5901647", "0.584553", "0.5815878", "0.58042645", "0.5801946", "0.5765564", "0.57637656", "0.5749498", "0.5722716", "0.5661563", "0.5660522", "0.56066173", "0.5565932", "0.55315346", "0.55289805", "0.5522268", "0.5481762", "0.54630154", "0.5443189"...
0.6035755
0
figure out filename (or eventually URI) of pregenerated NEMSformat recording for a given cell/batch/loader string very baphyspecific. Needs to be coordinated with loader processing in nems0.xform_helper
def generate_recording_uri(cellid, batch, loader): options = {} if loader in ["ozgf100ch18", "ozgf100ch18n"]: options = {'rasterfs': 100, 'includeprestim': True, 'stimfmt': 'ozgf', 'chancount': 18} elif loader in ["ozgf100ch18pup", "ozgf100ch18npup"]: options = {'rasterfs': 100, 'stimfmt': 'ozgf', 'chancount': 18, 'pupil': True, 'stim': True, 'pupil_deblink': True, 'pupil_median': 2} elif (loader.startswith("nostim200pup") or loader.startswith("psth200pup") or loader.startswith("psths200pup")): options = {'rasterfs': 200, 'stimfmt': 'parm', 'chancount': 0, 'pupil': True, 'stim': False, 'pupil_deblink': 1, 'pupil_median': 0.5} elif loader.startswith("nostim10pup") or loader.startswith("psth10pup"): options = {'rasterfs': 10, 'stimfmt': 'parm', 'chancount': 0, 'pupil': True, 'stim': False, 'pupil_deblink': True, 'pupil_median': 2} elif (loader.startswith("nostim20pup") or loader.startswith("psth20pup") or loader.startswith("psths20pup") or loader.startswith("evt20pup")): options = {'rasterfs': 20, 'stimfmt': 'parm', 'chancount': 0, 'pupil': True, 'stim': False, 'pupil_deblink': 1, 'pupil_median': 0.5} elif (loader.startswith("nostim20") or loader.startswith("psth20") or loader.startswith("psthm20") or loader.startswith("psths20")): options = {'rasterfs': 20, 'stimfmt': 'parm', 'chancount': 0, 'pupil': False, 'stim': False} elif (loader.startswith("env100") or loader.startswith("envm100")): options = {'rasterfs': 100, 'stimfmt': 'envelope', 'chancount': 0} elif loader.startswith("env200"): options = {'rasterfs': 200, 'stimfmt': 'envelope', 'chancount': 0} else: raise ValueError('unknown loader string') recording_uri = get_recording_file(cellid, batch, options) return recording_uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getWaveformFileName(self):\n return self.waveform_info.split(\":\")[1][:20]", "def _get_output_filename(dataset_dir, split_name):\n return '%s/fer_%s.tfrecord' % (dataset_dir, split_name)", "def parse_rarefaction_fname(name_string):\r\n\r\n root, ext = os.path.splitext(name_string)\r\n root_l...
[ "0.58643293", "0.57457685", "0.5731537", "0.5680883", "0.5600236", "0.55516845", "0.55441797", "0.55333227", "0.5529272", "0.55155426", "0.54681367", "0.5426146", "0.541352", "0.5407", "0.54030865", "0.537808", "0.5370207", "0.53593916", "0.53360903", "0.5330803", "0.5321981"...
0.6297846
0
Pipeline for process of scraping new data Each step in the pipeline has corresponding directory of plugins. Plugins are dynamically loaded based on files in the corresponding dir.
def scrape_pipeline(args): kickoff = args.kickoff fname = args.fname d = DbHelper() s = Scraper() c = Crawler(20) if fname is not None: app_names = pd.read_csv(fname)['packageName'].tolist() apps = [list(a) for a in zip(app_names, d.app_names_to_uuids(app_names))] else: apps = None # start by updating top apps if not args.skip_top: logger.info("getting top apps...") new_top_list = c.get_top_apps_list() logger.info("scraping top apps not in DB...") s.scrape_missing(new_top_list, compare_top=True) logger.info("updating top apps...") d.update_top_apps(new_top_list) if kickoff == True: s = None if fname is None: # use crawler to get list of package names logger.error("Crawler for package names not implemented yet") return else: # use specified file of package names s = Scraper(input_file=fname) # use scraper logger.info("Starting efficient scrape...") s.efficient_scrape() logger.info("...efficient scrape done") else: # use updater logger.info("Starting updater...") if fname is None: u = Updater() else: u = Updater(input_file=fname) u.update_apps() logger.info("...update done") # crawl privacy policies c.crawl_app_privacy_policies(app_list=apps) if args.no_decompile: # download only logger.info("Starting download...") downloader = Downloader() if apps is None: downloader.download_all_from_db(top=True) else: downloader.download(apps) logger.info("...done") else: # download/decompile logger.info("Starting download and decompile...") download_decompile_all() logger.info("...download and decompile done") logger.info("run analysis pipeline now")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collectPlugins(self):\n\t\tself.locatePlugins()\n\t\tself.loadPlugins()", "async def load_plugins(self):\n for plug in os.listdir('plugins'):\n if plug.startswith('.'):\n continue\n if not os.path.isdir('plugins/%s' % plug) or not os.path.isfile('plugins/%s/hook.py...
[ "0.6368807", "0.63078946", "0.5957224", "0.59520274", "0.5943972", "0.59165645", "0.58626246", "0.5809751", "0.5809528", "0.57702124", "0.57576746", "0.5738377", "0.5659071", "0.5647176", "0.56273735", "0.55972546", "0.55384547", "0.55326164", "0.5529749", "0.55208814", "0.55...
0.5151529
55
Test if the add operation returns the correct result for a test case
def test_add_int(self): self.assertEqual(operations.add(3,4), 7)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_returns_correct_result(self):\n result = self.calc.add(2, 2)\n self.assertEqual(4, result)", "def test_add(self):\n self.assertEqual(add(1, 1), 2, \"Wrong answer\")\n self.assertEqual(add(10, 1), 11, \"Wrong answer\")\n self.assertEqual(add(15, 15), 30, \"Wrong ans...
[ "0.87641716", "0.8728581", "0.85901284", "0.83460325", "0.8322828", "0.82488525", "0.82488525", "0.82488525", "0.8183821", "0.8175811", "0.80814475", "0.8047177", "0.8006275", "0.79732543", "0.79165095", "0.7913617", "0.7900404", "0.77927405", "0.7747016", "0.774245", "0.7690...
0.8132406
10
Test if the devide operation returns the correct result for a test case
def test_devide_int(self): self.assertEqual(operations.devide(8,4), 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_four_divided_by_two():\n assert divide(4, 2) == 2", "def test_call_decompose(self):\n dec = TwoQubitDecomposeUpToDiagonal()\n u4 = scipy.stats.unitary_group.rvs(4, random_state=47)\n dmat, circ2cx = dec(u4)\n dec_diag = dmat @ Operator(circ2cx).data\n self.assertTru...
[ "0.6033728", "0.60241795", "0.5930758", "0.5923345", "0.59231955", "0.5863153", "0.5844059", "0.58171606", "0.5786536", "0.575913", "0.5739477", "0.5738475", "0.5738475", "0.57308537", "0.5692235", "0.56907034", "0.5671279", "0.56524765", "0.56412613", "0.5602296", "0.5571406...
0.56738025
16
Test if the equation 1 + 2 is parsed and calculated correctly
def test_parse_add(self): self.assertEqual(parse_input.parse(["1", "+", "2"]), 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def autosolve(equation):\n\n try:\n # Try to set a variable to an integer\n num1 = int(equation.split(\" \")[0])\n\n except ValueError:\n # Try to set a variable to a decimal\n num1 = float(equation.split(\" \")[0])\n\n try:\n # Try to set a variable to an integer\n ...
[ "0.69371355", "0.69028455", "0.68847096", "0.68847096", "0.6814599", "0.6751683", "0.6709436", "0.6692912", "0.66831404", "0.66694516", "0.6641257", "0.66409284", "0.6588774", "0.6569552", "0.6498368", "0.649347", "0.64901173", "0.6489638", "0.64841825", "0.6454213", "0.64497...
0.59757686
62
Test if the equation 3 / 2 is parsed and calculated correctly
def test_parse_devide(self): self.assertEqual(parse_input.parse(["8", "/", "4"]), 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_calculate_three_operations_in_bracket(self):\n result = self.calcuate.calcuate('(2x2+1+7)x3-2')\n expected_result = \"34\"\n self.assertEqual(expected_result, result)", "def is_equation(self): \n return False", "def is_equation(self):\n return True", "def is...
[ "0.6916226", "0.68524635", "0.67907655", "0.67907655", "0.66762483", "0.66166973", "0.6573047", "0.6543683", "0.6413765", "0.62079144", "0.61546904", "0.60421985", "0.603584", "0.60189974", "0.6009543", "0.6008097", "0.5994459", "0.5980641", "0.5955912", "0.59496975", "0.5947...
0.0
-1
Load PTB raw data from data directory "data_path". Reads PTB text files, converts strings to integer ids, and performs minibatching of the inputs.
def ptb_raw_data(data_path=None): train_path = os.path.join(data_path, "ptb.train.txt") valid_path = os.path.join(data_path, "ptb.valid.txt") test_path = os.path.join(data_path, "ptb.test.txt") word_to_id = _build_vocab(train_path) train_data = _file_to_word_ids(train_path, word_to_id) valid_data = _file_to_word_ids(valid_path, word_to_id) test_data = _file_to_word_ids(test_path, word_to_id) vocabulary = len(word_to_id) return train_data, valid_data, test_data, vocabulary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ptb_raw_data(data_path=None):\n\n\t# train_path = os.path.join(data_path, \"ptb.train.txt\")\n\t# valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n\t# test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n\tdata = np.load(data_path)\n\t# data = np.load(data_path).item()\n\t# f = open(data_path)\n\t#...
[ "0.67146957", "0.6536038", "0.6505355", "0.6482044", "0.62835264", "0.62004197", "0.61886907", "0.6138994", "0.6122752", "0.6041423", "0.6030121", "0.5910836", "0.58546567", "0.5850749", "0.5726605", "0.5721663", "0.5707934", "0.5683992", "0.5656485", "0.5635938", "0.56157595...
0.6501877
3
Load raw data from data directory "data_path".
def europarl_raw_data( data_path='bigdata/training', lang1='de-en-german.txt', lang2='de-en-english.txt', max_train_len=32, train_size=1600000, val_size=160000, ): lang1_path = os.path.join(data_path, lang1) lang2_path = os.path.join(data_path, lang2) split_data = _train_val_test_split( [_read_lines(lang1_path), _read_lines(lang2_path)], train_size, val_size ) lang1_train, lang1_val, lang1_test = split_data[0] lang2_train, lang2_val, lang2_test = split_data[1] lang1_idx2word, lang1_word2idx = _build_vocab_from_sentences(lang1_train) lang2_idx2word, lang2_word2idx = _build_vocab_from_sentences(lang2_train) lang1_train_vectorized = _convert_sentences_to_ids( lang1_train, lang1_word2idx ) lang1_val_vectorized = _convert_sentences_to_ids( lang1_val, lang1_word2idx ) lang1_test_vectorized = _convert_sentences_to_ids( lang1_test, lang1_word2idx ) lang2_train_vectorized = _convert_sentences_to_ids( lang2_train, lang2_word2idx ) X_train, y_train = _convert_to_numpy_by_length( lang1_train_vectorized, lang2_train_vectorized, max_train_len, ) X_val = _convert_to_numpy(lang1_val_vectorized) X_test = _convert_to_numpy(lang1_test_vectorized) return { 'vocab': { 'lang1_idx2word': lang1_idx2word, 'lang1_word2idx': lang1_word2idx, 'lang2_idx2word': lang2_idx2word, 'lang2_word2idx': lang2_word2idx, }, 'train': { 'X': X_train, 'y': y_train, }, 'val': { 'X': X_val, 'y': lang2_val, }, 'test': { 'X': X_test, 'y': lang2_test, }, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_data(self, path):\n with open(self.path_to_file, \"r\") as f:\n data = f.read()\n\n return data", "def load_data(path):\n input_file = os.path.join(path)\n with open(input_file, 'r', encoding='utf-8') as f:\n return f.read()", "def load_data(path):\n with open...
[ "0.7591733", "0.7125923", "0.70399314", "0.70078355", "0.69444233", "0.6751344", "0.67399246", "0.6689886", "0.6673148", "0.66665107", "0.66659683", "0.6657956", "0.6638113", "0.654055", "0.64056915", "0.63831306", "0.6358966", "0.6358966", "0.63364625", "0.6292081", "0.62728...
0.0
-1
Iterate on the raw PTB data. This chunks up raw_data into batches of examples and returns Tensors that are drawn from these batches.
def ptb_producer(raw_data, batch_size, num_steps, name=None): with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]): raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32) data_len = tf.size(raw_data) batch_len = data_len // batch_size data = tf.reshape(raw_data[0 : batch_size * batch_len], [batch_size, batch_len]) epoch_size = (batch_len - 1) // num_steps assertion = tf.assert_positive( epoch_size, message="epoch_size == 0, decrease batch_size or num_steps") with tf.control_dependencies([assertion]): epoch_size = tf.identity(epoch_size, name="epoch_size") i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue() x = tf.strided_slice(data, [0, i * num_steps], [batch_size, (i + 1) * num_steps]) x.set_shape([batch_size, num_steps]) y = tf.strided_slice(data, [0, i * num_steps + 1], [batch_size, (i + 1) * num_steps + 1]) y.set_shape([batch_size, num_steps]) return x, y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ptb_iterator(raw_data, batch_size, num_steps):\n raw_data = np.array(raw_data, dtype=np.int32)\n\n data_len = len(raw_data)\n batch_len = data_len // batch_size\n data = np.zeros([batch_size, batch_len], dtype=np.int32)\n for i in range(batch_size):\n data[i] = raw_data[batch_len * i:batch_len * (i + 1...
[ "0.6978561", "0.6902418", "0.63087994", "0.61956096", "0.61338556", "0.6096684", "0.60834736", "0.60573965", "0.6056362", "0.6045705", "0.60360193", "0.6022146", "0.601624", "0.6003096", "0.59680426", "0.5925938", "0.59207135", "0.5913195", "0.59102833", "0.5889849", "0.58750...
0.54554814
77
Return the notify service.
def get_service( hass: HomeAssistant, config: ConfigType, discovery_info: DiscoveryInfoType | None = None, ) -> RocketChatNotificationService | None: username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) url = config.get(CONF_URL) room = config.get(CONF_ROOM) try: return RocketChatNotificationService(url, username, password, room) except RocketConnectionException: _LOGGER.warning("Unable to connect to Rocket.Chat server at %s", url) except RocketAuthenticationException: _LOGGER.warning("Rocket.Chat authentication failed for user %s", username) _LOGGER.info("Please check your username/password") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_service(hass, config, discovery_info=None):\n\n sender_nr = config[CONF_SENDER_NR]\n recp_nrs = config[CONF_RECP_NR]\n signal_cli_rest_api_url = config[CONF_SIGNAL_CLI_REST_API]\n\n signal_cli_rest_api = SignalCliRestApi(signal_cli_rest_api_url, sender_nr)\n\n return SignalNotificationServic...
[ "0.6690221", "0.6181973", "0.6181973", "0.6181973", "0.6116812", "0.60631627", "0.6032626", "0.6008227", "0.5919631", "0.5903544", "0.5897824", "0.5842087", "0.5824085", "0.5796516", "0.5742724", "0.5742724", "0.57323897", "0.5706646", "0.56921333", "0.5666572", "0.56374615",...
0.5747234
14
Send a message to Rocket.Chat.
def send_message(self, message="", **kwargs): data = kwargs.get(ATTR_DATA) or {} resp = self._server.chat_post_message(message, channel=self._room, **data) if resp.status_code == HTTPStatus.OK: if not resp.json()["success"]: _LOGGER.error("Unable to post Rocket.Chat message") else: _LOGGER.error( "Incorrect status code when posting message: %d", resp.status_code )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send(self, msg):\n self.message('Me', msg)", "def...
[ "0.79602695", "0.7867005", "0.7461435", "0.74155366", "0.73181975", "0.72749156", "0.72387594", "0.72059983", "0.71983445", "0.71811515", "0.7162376", "0.71472645", "0.7142168", "0.7140404", "0.70665747", "0.7054692", "0.704193", "0.70210105", "0.69757414", "0.69357747", "0.6...
0.6836778
31
Unit test for Roybal_Student_Analytics constructor.
def test_init(self): s = Student_Analytics() self.assertEqual(len(s.data),89)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, student):\n pass", "def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n ...
[ "0.6371787", "0.6314157", "0.6181478", "0.6096545", "0.6089736", "0.60254514", "0.5956355", "0.59550136", "0.594131", "0.58837605", "0.58226454", "0.5809876", "0.57946306", "0.5790085", "0.578611", "0.57836026", "0.57833344", "0.57766145", "0.5764777", "0.5755531", "0.574614"...
0.75536776
0
Unit test for Roybal_Student_Analytics classify_grade method.
def test_classify_grade(self): s = Student_Analytics() self.assertEqual(s.classify_grade(5.00),"A+")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")", "def classification_score(self, x, y):\t\n\t\tpass", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n ...
[ "0.8039397", "0.65274847", "0.640173", "0.6289307", "0.6219212", "0.5914755", "0.5887433", "0.5776925", "0.5719986", "0.56444573", "0.56377053", "0.56352776", "0.56190753", "0.56097853", "0.5556611", "0.55541223", "0.55449235", "0.5517405", "0.55008376", "0.54830605", "0.5482...
0.8334534
0
Unit test for Roybal_Student_Analytics element_count method.
def test_element_count(self): s = Student_Analytics() self.assertEqual(s.element_count(2,"F"),6)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_elements_count():\n # GIVEN\n bboxes = [8.67066,49.41423,8.68177,49.4204]\n time = \"2010-01-01/2011-01-01/P1Y\"\n keys = [\"building\"]\n values = [\"\"]\n\n timestamps = [\"2010-01-01T00:00:00Z\", \"2011-01-01T00:00:00Z\"]\n counts = [53.0, 256.0]\n expected = pd.DataFrame({\"tim...
[ "0.68616205", "0.6832444", "0.67097497", "0.66983885", "0.667081", "0.66494507", "0.66494507", "0.66494507", "0.66494507", "0.6644853", "0.66385466", "0.6608439", "0.6572097", "0.64722455", "0.64489216", "0.6442627", "0.6383579", "0.63805336", "0.6367451", "0.63651866", "0.63...
0.8672462
0
Unit test for Roybal_Student_Analytics avg_grade method.
def test_avg_grade(self): s = Student_Analytics() self.assertEqual(s.classify_grade(s.avg_grade(3)),"B")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_classify_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(5.00),\"A+\")", "def test_multiple_averages(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n GradeFactory(\n score=50...
[ "0.7095265", "0.6956697", "0.68770933", "0.68014836", "0.68014836", "0.6484994", "0.6410727", "0.6400051", "0.63546544", "0.63439816", "0.627917", "0.62735564", "0.62159175", "0.6123687", "0.6123593", "0.6098858", "0.6067414", "0.60544163", "0.5998449", "0.59840584", "0.59782...
0.8476935
0
Unit test for Roybal_Student_Analytics grade_change method.
def test_grade_change(self): s = Student_Analytics() self.assertEqual(int(s.grade_change()),0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")", "def test_grade(self, grade):\n self.client.login(username=self.student.username, password=self.password)\n with patch('lms.djangoapps.grades.course_grade_factory.CourseGradeFactor...
[ "0.6894253", "0.67889714", "0.6747671", "0.6735463", "0.65195036", "0.63752735", "0.63277483", "0.6276599", "0.62044054", "0.6194973", "0.6135887", "0.6101513", "0.6052034", "0.5957101", "0.5942507", "0.59255475", "0.58964777", "0.5884683", "0.58781224", "0.58647764", "0.5830...
0.8471558
0
Return true if the socket managed by this connection is connected
def is_connected(self): return self.socket is not None and self.socket.connected and super(WebsocketTransport, self).is_connected()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_connected(self):\n if self._socket:\n return True\n else:\n return False", "def is_connected(self):\r\n return self.__socket is not None", "def is_connected(self):\n return self._socket is not None", "def getIsConnected(self):\n if self._socket ...
[ "0.9105765", "0.8836678", "0.8816751", "0.8545325", "0.8359238", "0.82961154", "0.82776904", "0.8272576", "0.82721364", "0.82631487", "0.82311773", "0.8210377", "0.8137813", "0.80617905", "0.80345", "0.80271804", "0.79471153", "0.79411083", "0.7917166", "0.7908697", "0.790361...
0.82416207
10
Disconnect the underlying socket connection
def disconnect_socket(self): self.running = False if self.socket is not None: self.socket.close() self.current_host_and_port = None self.socket = None self.notify('disconnected')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disconnect(self):\n self.connected = False\n self.socket.close()", "def disconnect(self):\n self.connected = False\n try:\n self.protocol.send_message(self.sock, '__!goodbye__')\n data = self.protocol.recover_message(self.sock)\n except:\n p...
[ "0.8091306", "0.8073663", "0.8069597", "0.7900187", "0.7873242", "0.7846093", "0.7717508", "0.7700298", "0.7663556", "0.76379234", "0.75356364", "0.7519973", "0.7475285", "0.74681747", "0.74621254", "0.7447179", "0.74322003", "0.74192196", "0.7402269", "0.7398234", "0.7384707...
0.81088334
0
Close the socket and clear the current host and port details.
def cleanup(self): try: self.socket.close() except: pass # ignore errors when attempting to close socket self.socket = None self.current_host_and_port = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n print('Closing server socket (host {}, port {})'.format(self.host, self.port))\n if self.sock:\n self.sock.close()\n self.sock = None", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n ...
[ "0.75493366", "0.747122", "0.747122", "0.747122", "0.747122", "0.747122", "0.747122", "0.7457175", "0.7457175", "0.7422063", "0.7360431", "0.73594326", "0.7347399", "0.7302408", "0.72775865", "0.7206107", "0.71936274", "0.7185615", "0.7109103", "0.7105997", "0.70846534", "0...
0.779209
0
Try connecting to the (host, port) tuples specified at construction time.
def attempt_connection(self): self.connection_error = False sleep_exp = 1 connect_count = 0 while self.running and self.socket is None and connect_count < self.__reconnect_attempts_max: for host_and_port in self.__hosts_and_ports: try: log.info("Attempting connection to websocket %s", host_and_port) self.socket = websocket.WebSocket() proto, host, port, path = host_and_port[3], host_and_port[0], host_and_port[1], host_and_port[2] if port: ws_uri = '{}://{}:{}/{}'.format(proto, host, port, path) else: ws_uri = '{}://{}/{}'.format(proto, host, path) self.socket.connect(ws_uri, timeout=self.__timeout) self.current_host_and_port = host_and_port log.info("Established connection to %s", ws_uri) break except WebSocketException: self.socket = None connect_count += 1 log.warning("Could not connect to host %s, port %s", host_and_port[0], host_and_port[1], exc_info=1) if self.socket is None: sleep_duration = (min(self.__reconnect_sleep_max, ((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase)) * math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp))) * (1.0 + random.random() * self.__reconnect_sleep_jitter)) sleep_end = monotonic() + sleep_duration log.debug("Sleeping for %.1f seconds before attempting reconnect", sleep_duration) while self.running and monotonic() < sleep_end: time.sleep(0.2) if sleep_duration < self.__reconnect_sleep_max: sleep_exp += 1 if not self.socket: raise exception.ConnectFailedException()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self, host, port):\n pass", "def connect(self):\n \n try:\n self.__sock.connect((self.__host, self.__port))\n\n except socket.error,e:\n print 'Oops, unable to connect. Try again!',e\n sys.exit(1)", "async def _connect(self, host_loc):\n ...
[ "0.73735076", "0.69012636", "0.6894325", "0.6791065", "0.67294514", "0.6698651", "0.66936195", "0.665516", "0.6614224", "0.66082466", "0.6591837", "0.65764606", "0.65412575", "0.64659244", "0.64588004", "0.642933", "0.6411092", "0.64094645", "0.63941145", "0.6381039", "0.6369...
0.0
-1
Call the protocol disconnection, and then stop the transport itself.
def disconnect(self, receipt=None, headers=None, **keyword_headers): Protocol11.disconnect(self, receipt, headers, **keyword_headers) self.transport.stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n self._transport = None\n self._cleanup()\n self._disconnected_callback = None", "def transportProtocolDisconnected(self, obj):\n if obj:\n log.msg(\"Protocol disconnected, losing connection..\")\n self.connection.loseConnection()\n tr...
[ "0.76196885", "0.73054206", "0.7146738", "0.7062836", "0.70271856", "0.6989695", "0.6907226", "0.6885972", "0.6851431", "0.67708176", "0.6740143", "0.6737467", "0.6714717", "0.6714717", "0.66998696", "0.6699246", "0.6678658", "0.66435677", "0.66178954", "0.66106606", "0.66057...
0.633989
47
Returns the network connected to the tenant router. Assumes a single router with a single tenant network connected.
def _tenant_network(self): port = self._connection.network.ports.find_by_device_owner('network:router_interface') if port: return self._connection.network.networks.get(port.network_id) else: raise errors.ImproperlyConfiguredError('Could not find tenancy network')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _external_network(self):\n try:\n router = next(self._connection.network.routers.all())\n except StopIteration:\n raise errors.ImproperlyConfiguredError('Could not find tenancy router.')\n return self._connection.network.networks.get(router.external_gateway_info['netw...
[ "0.77449316", "0.7281764", "0.7092121", "0.70122546", "0.6769052", "0.6733666", "0.6733666", "0.6733666", "0.6582788", "0.65117043", "0.6201502", "0.61733115", "0.6108502", "0.60738486", "0.60403067", "0.6020271", "0.600645", "0.5939682", "0.5937701", "0.5896336", "0.58795285...
0.796581
0
Returns the external network that connects the tenant router to the outside world.
def _external_network(self): try: router = next(self._connection.network.routers.all()) except StopIteration: raise errors.ImproperlyConfiguredError('Could not find tenancy router.') return self._connection.network.networks.get(router.external_gateway_info['network_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tenant_network(self):\n port = self._connection.network.ports.find_by_device_owner('network:router_interface')\n if port:\n return self._connection.network.networks.get(port.network_id)\n else:\n raise errors.ImproperlyConfiguredError('Could not find tenancy network'...
[ "0.7765178", "0.7262986", "0.6995677", "0.6902639", "0.6894767", "0.67648536", "0.6673926", "0.66279644", "0.6617179", "0.6617179", "0.6617179", "0.64150774", "0.64139897", "0.6407208", "0.63487035", "0.6331511", "0.6266315", "0.6229477", "0.6182302", "0.6143386", "0.61390054...
0.8163727
0
Returns the cluster manager for the tenancy.
def cluster_manager(self): # Lazily instantiate the cluster manager the first time it is asked for. if not hasattr(self, '_cluster_manager'): if self._cluster_engine: self._cluster_manager = self._cluster_engine.create_manager( self._username, self._tenancy ) else: self._cluster_manager = None # If there is still no cluster manager, clusters are not supported if not self._cluster_manager: raise errors.UnsupportedOperationError( 'Clusters are not supported for this tenancy.' ) return self._cluster_manager
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetManager(self):\r\n\r\n return self.manager", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin...
[ "0.66724825", "0.665754", "0.665754", "0.66434646", "0.663374", "0.6481853", "0.6345078", "0.6344062", "0.62795883", "0.6278968", "0.62144953", "0.60009164", "0.59885573", "0.5977996", "0.59189504", "0.5905458", "0.5904256", "0.5867709", "0.5859221", "0.58005214", "0.5737495"...
0.85531485
0
Fix up the cluster with any OpenStackspecific changes.
def _fixup_cluster(self, cluster): # Remove injected parameters from the cluster params params = { k: v for k, v in cluster.parameter_values.items() if k != 'cluster_network' } # Add any tags attached to the stack try: stack = self._connection.orchestration.stacks.find_by_stack_name(cluster.name) except rackit.NotFound: stack = None # We use this format because tags might exist on the stack but be None stack_tags = tuple(getattr(stack, 'tags', None) or []) original_error = (cluster.error_message or '').lower() # Convert quota-related error messages based on known OpenStack errors if any(m in original_error for m in {'quota exceeded', 'exceedsavailablequota'}): if 'floatingip' in original_error: error_message = ( 'Could not find an external IP for deployment. ' 'Please ensure an external IP is available and try again.' ) else: error_message = ( 'Requested resources exceed at least one quota. ' 'Please check your tenancy quotas and try again.' ) elif cluster.error_message: error_message = ( 'Error during cluster configuration. ' 'Please contact support.' ) else: error_message = None return cluster._replace( parameter_values = params, tags = cluster.tags + stack_tags, error_message = error_message )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def two_clusters_reconfiguration(self):\n\n self.show_step(1)\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(2)\n cluster_id_1 = self.fuel_web.create_cluster(\n name=\"env1\",\n mode=settings.DEPLOYMENT_MODE,\n settings={\n ...
[ "0.656177", "0.6422964", "0.64025843", "0.61629826", "0.61626065", "0.60646015", "0.6063085", "0.6026249", "0.59341043", "0.59069663", "0.5800315", "0.57841843", "0.57784903", "0.5644694", "0.5628729", "0.5616941", "0.56099975", "0.56072444", "0.55292267", "0.5507146", "0.549...
0.5728941
13
Route for front end to obtain the data for the Location of choice.
async def location_data(location: LocationDataRequest): # Make sure location paramater is a string in the form of "City, State" location = str(location) location = location.replace('location=', "") location = location.replace("'", "") # Queries for data response #pop_query = """SELECT "2019 Population" FROM CitySpire WHERE "Location" = %s""", [location] #rent_query = """SELECT "2019 Rental Rates" FROM CitySpire WHERE "Location" = %s""", [location] #walk_query = """SELECT "2019 Walk Score" FROM CitySpire WHERE "Location" = %s""", [location] #live_query = """SELECT "2019 Livability Score" FROM CitySpire WHERE "Location" = %s""", [location] cursor.execute("""SELECT "2019 Population" FROM cityspire WHERE "Location" = %s;""", [location]) pop = cursor.fetchone() #pop = pop[0][0] # This is slice slice the tuple value from the list of tuples cursor.execute("""SELECT "2019 Rental Rates" FROM cityspire WHERE "Location" = %s;""", [location]) rent = cursor.fetchone() #rent = rent[0][0] # This is slice slice the tuple value from the list of tuples cursor.execute("""SELECT "Walk Score" FROM cityspire WHERE "Location" = %s;""", [location]) walk = cursor.fetchone() #walk = walk[0][0] # This is slice slice the tuple value from the list of tuples cursor.execute("""SELECT "Livability Score" FROM cityspire WHERE "Location" = %s;""", [location]) live = cursor.fetchone() #live = live[0][0] # This is slice slice the tuple value from the list of tuples # Close the cursor and connection (this breaks the API) #cursor.close() #connection.close() # Return the data that was requested and queried return { "city_name": str(location), "population": int(pop[0]), "rent_per_month": int(rent[0]), "walk_score": int(walk[0]), "livability_score": int(live[0]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_location(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/location\"\n })", "def carslocation():\n # Check if user is loggedin\n if 'loggedin' in session:\n\n response = requests.get(\"http://localhost:8080/api/carslocation\")\n print(response.text...
[ "0.6643356", "0.6501124", "0.62959623", "0.6232448", "0.62138563", "0.6093613", "0.60217726", "0.59801704", "0.59584683", "0.58436835", "0.5829568", "0.57859164", "0.577833", "0.5771708", "0.56856674", "0.56406236", "0.5606987", "0.5575436", "0.55724984", "0.5565517", "0.5565...
0.52397954
64
Computes the forward pass for a fullyconnected layer. The input x has shape (N, Din) and contains a minibatch of N examples, where each example x[i] has shape (Din,).
def fc_forward(x, w, b): out = None ########################################################################### # TODO: Implement the forward pass. Store the result in out. # ########################################################################### N = x.shape[0] x2d = x.reshape(N, -1) out = x2d.dot(w) + b ########################################################################### # END OF YOUR CODE # ########################################################################### cache = (x, w, b) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = ...
[ "0.7153318", "0.6853486", "0.682324", "0.67808783", "0.6726576", "0.67157876", "0.66740847", "0.66715527", "0.6665411", "0.6663272", "0.6636702", "0.66286105", "0.6605179", "0.65966856", "0.6590063", "0.65451944", "0.6540172", "0.6539711", "0.6511885", "0.64758414", "0.645289...
0.0
-1
Computes the backward pass for a fully_connected layer.
def fc_backward(dout, cache): x, w, b = cache dx, dw, db = None, None, None ########################################################################### # TODO: Implement the affine backward pass. # ########################################################################### N = x.shape[0] x2d = x.reshape(N, -1) dx = dout.dot(w.T) dx = dx.reshape(x.shape) dw = x2d.T.dot(dout) db = dout.sum(axis=0) #add from top to down ########################################################################### # END OF YOUR CODE # ########################################################################### return dx, dw, db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def backward_pass(self, loss):\n\n ...
[ "0.75506145", "0.74210715", "0.708932", "0.7074831", "0.7008279", "0.69728214", "0.6948082", "0.69344884", "0.69324756", "0.6914606", "0.6914606", "0.687406", "0.68707025", "0.68107206", "0.67995775", "0.67714584", "0.67714584", "0.67669773", "0.67323184", "0.67095137", "0.67...
0.6278595
73
Computes the forward pass for a layer of rectified linear units (ReLUs).
def relu_forward(x): out = None ########################################################################### # TODO: Implement the ReLU forward pass. # ########################################################################### #out = np.zeros(x.shape) #np.clip(x, 0, None, out) out = np.empty_like(x) #faster than zeros np.clip(x, 0, None, out) #out = x #out [out < 0] = 0 #print(x) #print(out) ########################################################################### # END OF YOUR CODE # ########################################################################### cache = x return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_relus(self):\n\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n ...
[ "0.6775336", "0.6629373", "0.6602302", "0.6523369", "0.6508466", "0.64465874", "0.63842785", "0.626676", "0.620465", "0.6178559", "0.61525744", "0.613782", "0.60611176", "0.6054329", "0.6047199", "0.6046748", "0.59962803", "0.5989819", "0.596913", "0.5962442", "0.5948833", ...
0.60177517
16
Computes the backward pass for a layer of rectified linear units (ReLUs).
def relu_backward(dout, cache): dx, x = None, cache ########################################################################### # TODO: Implement the ReLU backward pass. # ########################################################################### #print(dout) dx = np.empty_like(dout) np.copyto(dx, dout) dx[x < 0] = 0 #print(dx) ########################################################################### # END OF YOUR CODE # ########################################################################### return dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relu_backward(self, dUpper, cache):\n x = cache\n #############################################################################\n # TODO: Implement the ReLU backward pass. #\n #########################################################################...
[ "0.70412636", "0.694716", "0.6897693", "0.6847592", "0.6847592", "0.6846935", "0.6841099", "0.68178034", "0.6766849", "0.6754919", "0.67536426", "0.67530656", "0.67419785", "0.6739244", "0.6713423", "0.6708748", "0.6699276", "0.6696307", "0.6696307", "0.66860896", "0.6665928"...
0.66615254
25
Forward pass for batch normalization. During training the sample mean and (uncorrected) sample variance are computed from minibatch statistics and used to normalize the incoming data. During training we also keep an exponentially decaying running mean of the mean and variance of each feature, and these averages are used to normalize data at testtime. At each timestep we update the running averages for mean and variance using
def batchnorm_forward(x, gamma, beta, bn_param): mode = bn_param['mode'] eps = bn_param.get('eps', 1e-5) momentum = bn_param.get('momentum', 0.9) N, D = x.shape running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype)) running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype)) out, cache = None, None mu = np.mean(x, axis=0) var = np.var(x, axis=0) sigma = np.sqrt(var+eps) if mode == 'train': ####################################################################### # TODO: Implement the training-time forward pass for batch norm. # # Use minibatch statistics to compute the mean and variance, use # # these statistics to normalize the incoming data, and scale and # # shift the normalized data using gamma and beta. # # # # You should store the output in the variable out. Any intermediates # # that you need for the backward pass should be stored in the cache # # variable. # # # # You should also use your computed sample mean and variance together # # with the momentum variable to update the running mean and running # # variance, storing your result in the running_mean and running_var # # variables. # # # # Note that though you should be keeping track of the running # # variance, you should normalize the data based on the standard # # deviation (square root of variance) instead! # # Referencing the original paper (https://arxiv.org/abs/1502.03167) # # might prove to be helpful. # ####################################################################### out = gamma * (x - mu)/sigma + beta #out = (x - mu)/sigma #out = out * gamma.T + beta.T #print(gamma.shape) #out = out * gamma + beta #print(out.shape) running_mean = momentum * running_mean + (1 - momentum) * mu running_var = momentum * running_var + (1 - momentum) * (var+eps) ####################################################################### # END OF YOUR CODE # ####################################################################### elif mode == 'test': ####################################################################### # TODO: Implement the test-time forward pass for batch normalization. # # Use the running mean and variance to normalize the incoming data, # # then scale and shift the normalized data using gamma and beta. # # Store the result in the out variable. # ####################################################################### out = (x - running_mean) / np.sqrt(running_var) * gamma + beta ####################################################################### # END OF YOUR CODE # ####################################################################### else: raise ValueError('Invalid forward batchnorm mode "%s"' % mode) # Store the updated running means back into bn_param bn_param['running_mean'] = running_mean bn_param['running_var'] = running_var cache = (x, mu, sigma, gamma, beta) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self, x, train=True):\n if train is not None:\n mean, variance = tf.nn.moments(x, [0,1,2])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(variance)\n with tf.control_dependencies([assign_mean, assign_variance]):\n return tf.nn.batch_norm_...
[ "0.73375744", "0.7016016", "0.69402426", "0.6930308", "0.6890035", "0.6870713", "0.6845096", "0.6766321", "0.67575425", "0.6714171", "0.67051464", "0.6647627", "0.65995526", "0.65970504", "0.65763676", "0.6518246", "0.6511042", "0.6459731", "0.64524907", "0.64098454", "0.6387...
0.6935189
3
Backward pass for batch normalization. For this implementation, you should write out a computation graph for batch normalization on paper and propagate gradients backward through intermediate nodes.
def batchnorm_backward(dout, cache): dx, dgamma, dbeta = None, None, None ########################################################################### # TODO: Implement the backward pass for batch normalization. Store the # # results in the dx, dgamma, and dbeta variables. # # Referencing the original paper (https://arxiv.org/abs/1502.03167) # # might prove to be helpful. # ########################################################################### x, mu, sigma, gamma, beta = cache N = dout.shape[0] X_mu = x - mu var_inv = 1./sigma dX_norm = dout * gamma dvar = np.sum(dX_norm * X_mu,axis=0) * -0.5 * sigma**(-3) dmu = np.sum(dX_norm * -var_inv ,axis=0) + dvar * 1/N * np.sum(-2.* X_mu, axis=0) dx = (dX_norm * var_inv) + (dmu / N) + (dvar * 2/N * X_mu) dbeta = np.sum(dout, axis=0) dgamma = np.sum(dout * X_mu/sigma, axis=0) ########################################################################### # END OF YOUR CODE # ########################################################################### return dx, dgamma, dbeta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. ...
[ "0.71418196", "0.6997701", "0.6994699", "0.69764066", "0.69122756", "0.6845697", "0.68414086", "0.67535686", "0.6739037", "0.65906703", "0.6587759", "0.6582729", "0.65654385", "0.65509117", "0.65476483", "0.6520883", "0.65095323", "0.65002865", "0.647348", "0.6422956", "0.641...
0.6634307
9
Performs the forward pass for dropout.
def dropout_forward(x, dropout_param): p, mode = dropout_param['p'], dropout_param['mode'] if 'seed' in dropout_param: np.random.seed(dropout_param['seed']) mask = None out = None if mode == 'train': ####################################################################### # TODO: Implement training phase forward pass for inverted dropout. # # Store the dropout mask in the mask variable. # ####################################################################### mask = np.random.random_sample(x.shape) mask = mask < p out = x * mask ####################################################################### # END OF YOUR CODE # ####################################################################### elif mode == 'test': ####################################################################### # TODO: Implement the test phase forward pass for inverted dropout. # ####################################################################### out = np.empty_like(x) np.copyto(out,x) ####################################################################### # END OF YOUR CODE # ####################################################################### cache = (dropout_param, mask) out = out.astype(x.dtype, copy=False) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward( self, x ):\n x = x + self.pe[ :x.size(0), : ]\n return self.dropout( x )", "def forward(self, x):\n x = x + self.pe[: x.size(0), :]\n return self.dropout(x)", "def forward(self, x):\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)", "d...
[ "0.6779948", "0.66298467", "0.6543176", "0.6543176", "0.6543176", "0.6543176", "0.65413773", "0.64989567", "0.64989567", "0.6470937", "0.64692926", "0.64219373", "0.6366126", "0.635455", "0.6328682", "0.6312325", "0.6288312", "0.623391", "0.623391", "0.623391", "0.6223676", ...
0.6543027
6
Perform the backward pass for dropout.
def dropout_backward(dout, cache): dropout_param, mask = cache mode = dropout_param['mode'] dx = None if mode == 'train': ####################################################################### # TODO: Implement training phase backward pass for inverted dropout # ####################################################################### dx = dout * mask ####################################################################### # END OF YOUR CODE # ####################################################################### elif mode == 'test': dx = dout return dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n p = dropout_param['p']\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inve...
[ "0.7518181", "0.7460461", "0.74584544", "0.7332788", "0.7288746", "0.71464086", "0.7143232", "0.69664955", "0.69397676", "0.69232845", "0.69232845", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345...
0.7471565
2
The input consists of N data points, each with C channels, height H and width W. We convolve each input with F different filters, where each filter spans all C channels and has height HH and width WW. Assume that stride=1 and there is no padding. You can ignore the bias term in your implementation.
def conv_forward(x, w): out = None ########################################################################### # TODO: Implement the convolutional forward pass. # # Hint: you can use the function np.pad for padding. # ########################################################################### N, C, H, W = x.shape F, C, HH, WW = w.shape H_prime = H - (HH - 1) W_prime = W - (WW - 1) out = np.zeros((N, F, H_prime, W_prime)) for n in range(N): for f in range(F): for i in range(H_prime): for j in range(W_prime): out[n, f, i, j] = np.sum(x[n, :, i:i+HH, j:j+WW] * w[f]) ########################################################################### # END OF YOUR CODE # ########################################################################### cache = (x, w) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loop_conv(X, W):\n # Go over all five dimensions \n # (#batches x #channels x #height x #width x #dur/length )\n # with filter that has\n # #filters x #channels x #height x #width x #dur/length \n num_filters = W.shape[0]\n filt_channels = W.shape[1]\n filt_height = W.shape[2]\n filt_wi...
[ "0.77974457", "0.68532217", "0.6843606", "0.68252665", "0.6792097", "0.6787238", "0.6778164", "0.6764888", "0.67277294", "0.66646516", "0.6578862", "0.65779644", "0.6572897", "0.65086555", "0.64898103", "0.6478974", "0.6441421", "0.64312285", "0.6427597", "0.6360412", "0.6359...
0.62824273
27
A naive implementation of the forward pass for a maxpooling layer.
def max_pool_forward(x, pool_param): out = None ########################################################################### # TODO: Implement the max-pooling forward pass # ########################################################################### N, C, H, W = x.shape pool_height = pool_param['pool_height'] pool_width = pool_param['pool_width'] stride = pool_param['stride'] H_prime = int(1 + (H - pool_height) / stride) W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division out = np.zeros((N,C,H_prime,W_prime)) for n in range(N): for i in range(H_prime): for j in range(W_prime): h_start = i * stride h_end = h_start + pool_height w_start = j * stride w_end = w_start + pool_width pool_window = x[n, :, h_start:h_end, w_start:w_end] pool_window = pool_window.reshape((C,-1)) out[n,:,i,j] = np.max(pool_window, axis=1) ########################################################################### # END OF YOUR CODE # ########################################################################### cache = (x, pool_param) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_pool_forward_naive(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max pooling forward pass #\n ###########################################################################\n N,C,H,W ...
[ "0.78478473", "0.77464086", "0.7588915", "0.75831836", "0.75474185", "0.74184185", "0.7378611", "0.7318571", "0.72232896", "0.71326613", "0.7098723", "0.70758176", "0.6990659", "0.69255495", "0.69221073", "0.68792313", "0.68130654", "0.68058133", "0.6792034", "0.6792034", "0....
0.74050933
6
A naive implementation of the backward pass for a maxpooling layer.
def max_pool_backward(dout, cache): dx = None ########################################################################### # TODO: Implement the max-pooling backward pass # ########################################################################### x, pool_param = cache N, C, H, W = x.shape pool_height = pool_param['pool_height'] pool_width = pool_param['pool_width'] stride = pool_param['stride'] H_prime = int(1 + (H - pool_height) / stride) W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division dx = np.zeros((N, C, H, W)) for n in range(N): for c in range(C): for i in range(H_prime): for j in range(W_prime): h_start = i * stride h_end = h_start + pool_height w_start = j * stride w_end = w_start + pool_width pool_window = x[n, c, h_start:h_end, w_start:w_end] maxValue = np.max(pool_window) dx[n,c,h_start:h_end,w_start:w_end] += dout[n,c,i,j] * (pool_window == maxValue) ########################################################################### # END OF YOUR CODE # ########################################################################### return dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n pass\n ######...
[ "0.78717184", "0.7527569", "0.74360836", "0.742322", "0.74153745", "0.74050575", "0.736715", "0.73291767", "0.71188295", "0.71188295", "0.7104715", "0.7095607", "0.7016401", "0.69276273", "0.6922222", "0.68295985", "0.67844594", "0.67377377", "0.6726962", "0.6720057", "0.6710...
0.73897725
6
Computes the loss and gradient for binary SVM classification.
def svm_loss(x, y): N = x.shape[0] x = np.squeeze(x) loss = np.sum(((1-x*y)>0)*(1-x*y))/N dx = ((1-x*y)>0)*(-y)/N return loss, dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svm_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement loss and gradient for multiclass SVM classification. #\n # This will be similar to the svm loss vectorized implementation in #\n # cs231n/classifiers...
[ "0.7909253", "0.7767066", "0.7405515", "0.7281215", "0.7261097", "0.7241102", "0.7210721", "0.7159015", "0.7136917", "0.7069623", "0.70671463", "0.7060711", "0.7044991", "0.69557554", "0.6928465", "0.6869657", "0.68506974", "0.6844779", "0.6830456", "0.6820079", "0.67838556",...
0.6714259
21
Computes the loss and gradient for binary classification with logistic regression.
def logistic_loss(x, y): N = x.shape[0] x = np.squeeze(x) y_prime = (y + 1)/2 h = 1 /(1 + np.exp(-x)) loss = np.sum(-np.log( (h**y_prime) * ((1-h)**(1-y_prime)) ))/N dx = np.exp(-y*x)*(-y)/(1+np.exp(-y*x))/N return loss, dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all da...
[ "0.70904523", "0.7072484", "0.6969767", "0.6957916", "0.6951513", "0.6896685", "0.68876517", "0.683848", "0.6820088", "0.6813277", "0.68107325", "0.6786551", "0.6774134", "0.6744257", "0.6732447", "0.673168", "0.6722383", "0.67222416", "0.6706524", "0.66843295", "0.66585207",...
0.6678023
20
Computes the loss and gradient for softmax classification.
def softmax_loss(x, y): N, C = x.shape loss, dx = 0, np.zeros(x.shape) for i in range(N): loss += -np.log(np.exp(x[i,y[i]])/np.sum(np.exp(x[i,:]))) dx[i,:] = np.exp(x[i,:])/np.sum(np.exp(x[i,:])) dx[i,y[i]] += (-1) loss /= N dx /= N return loss, dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ==================================================...
[ "0.8163197", "0.7849804", "0.7754222", "0.7720902", "0.7686208", "0.7658823", "0.7655289", "0.75943136", "0.75897485", "0.75817245", "0.758024", "0.75620025", "0.753615", "0.7510059", "0.7506491", "0.75061125", "0.7504011", "0.74645996", "0.73846763", "0.7381628", "0.7378212"...
0.6929496
54
When cache is enabled, records the current request response json content in the cache file.
def set_cached_response(self) -> None: if self.get_caching_duration() > 0: # if caching is enabled for this request json_response = self._request_result.json() with open(self.cache_file_name, 'w') as json_file: json.dump(json_response, json_file, indent=4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache():\n if request.method == 'GET':\n cache_info = in_water.cache_info()\n return json.dumps({\n 'hits': cache_info.hits,\n 'misses': cache_info.misses,\n 'maxsize': cache_info.maxsize,\n 'currsize': cache_info.currsize,\n })", "def saveC...
[ "0.7289211", "0.71144605", "0.7006052", "0.68958193", "0.688203", "0.68352675", "0.66610754", "0.6637904", "0.6591506", "0.6493748", "0.64900154", "0.6430256", "0.6393354", "0.62931097", "0.6260045", "0.62439954", "0.62336665", "0.6196039", "0.6179128", "0.615851", "0.6150054...
0.83422124
0
Method processing the json content of a request, and returning a valid RestoRequestResult.
def process_json_result(self, json_result: dict) -> RestoRequestResult: try: resto_response = self.resto_response_cls(self, json_result) except RestoResponseError: msg = 'Response to {} from {} resto server cannot be understood.' # TOOD: move elsewhere ? raise IncomprehensibleResponse(msg.format(self.get_server_name())) return resto_response.as_resto_object()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process(self, request, processor, operation):\n \n params = request.REQUEST.get('json', None)\n if params is None :\n params = ApiFacade._convertMergeDict(request.REQUEST)\n else :\n params=params\n params = json.loads(params)\n \n resp_bo...
[ "0.60520667", "0.5982503", "0.585078", "0.5832222", "0.5702895", "0.5620679", "0.55594456", "0.55570936", "0.555036", "0.547908", "0.54710627", "0.54656655", "0.542057", "0.54201967", "0.54093033", "0.5404972", "0.53861696", "0.534103", "0.534082", "0.5264856", "0.5264153", ...
0.6597384
0
Recommended maximum number of datapoints Returns int
def recommended_max_num_datapoints(self) -> int: # very large number, essentially no limit by default return 1e9
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def graph_data_size_max(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_max or 0)", "def get_minimum_number_of_data_points(cls):\n return cls._MINIMUM_NUMBER_OF_DATA_POINTS", "def numberOfPoints(self):\n return 20000", "def data_edge_count_max(self) -> int:\n return int(...
[ "0.74292016", "0.74018806", "0.7281152", "0.7237732", "0.7090521", "0.6958116", "0.6913622", "0.68758655", "0.6854837", "0.68302363", "0.67935854", "0.6772748", "0.6769792", "0.67452073", "0.67428017", "0.67161715", "0.66959566", "0.668196", "0.66803277", "0.6658686", "0.6651...
0.87322336
0
Fits function y=f(x) given training pairs (x_train, y_train).
def _fit(self, x_train, y_train, x_valid, y_valid, regressor_callback=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, X, y):", "def train(self, x_train, y_train, x_val, y_val):\n pass", "def train(self, X, y):\n pass", "def train(self, X, y):\n lagrange_multipliers = self._compute_multipliers(X, y)\n return self._construct_predictor(X, y, lagrange_multipliers)", "def fit(self, X...
[ "0.7322144", "0.71384645", "0.71244806", "0.69531596", "0.6825773", "0.6776183", "0.6776183", "0.67015135", "0.6648316", "0.6648316", "0.6648316", "0.66165537", "0.6569884", "0.65607", "0.64630246", "0.6440666", "0.6422532", "0.6422532", "0.63722926", "0.6364112", "0.6359813"...
0.6362209
20
Fits function y=f(x) given training pairs (x_train, y_train).
def fit( self, x_train, y_train, x_valid=None, y_valid=None, regressor_callback=None ): has_more_than_one_channel = len(y_train.shape) > 1 if x_valid is None: x_valid = x_train y_valid = y_train # to the multi-channel form, but with just one chanel; if not has_more_than_one_channel: y_train = y_train[numpy.newaxis, ...] y_valid = y_valid[numpy.newaxis, ...] self.models = [] self._stop_fit = False for y_train_channel, y_valid_channel in zip(y_train, y_valid): gc.collect() model_channel = self._fit( x_train, y_train_channel, x_valid, y_valid_channel, regressor_callback ) self.models.append(model_channel) self.loss_history.append(model_channel.loss_history) self.num_channels = len(self.models)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, X, y):", "def train(self, x_train, y_train, x_val, y_val):\n pass", "def train(self, X, y):\n pass", "def train(self, X, y):\n lagrange_multipliers = self._compute_multipliers(X, y)\n return self._construct_predictor(X, y, lagrange_multipliers)", "def fit(self, X...
[ "0.73206455", "0.7135874", "0.7122955", "0.6952598", "0.682414", "0.6773915", "0.6773915", "0.6700229", "0.6648014", "0.6648014", "0.6648014", "0.66138375", "0.65697306", "0.6558614", "0.64612556", "0.6438456", "0.6420302", "0.6420302", "0.6370733", "0.6363991", "0.6362005", ...
0.0
-1
Stops training (can be called by another thread)
def stop_fit(self): self._stop_fit = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_training(self):\n self.training = False", "def stop_training_job(TrainingJobName=None):\n pass", "def stop(self):\n self.requested_state = 'Stopped'\n self.ml_interface.stop()", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop(self):\n ...
[ "0.8000698", "0.759568", "0.73393655", "0.72586185", "0.72586185", "0.71302235", "0.7112129", "0.70853776", "0.7046241", "0.70452595", "0.70376647", "0.70376647", "0.70227903", "0.70227903", "0.70227903", "0.70227903", "0.7018452", "0.70008683", "0.70008683", "0.69937384", "0...
0.0
-1
Saves an 'allbatteriesincluded' regressor at a given path (folder).
def save(self, path: str): os.makedirs(path, exist_ok=True) frozen = encode_indent(self) lprint(f"Saving regressor to: {path}") with open(join(path, "regressor.json"), "w") as json_file: json_file.write(frozen) for i, model in enumerate(self.models): channel_path = join(path, f"channel{i}") os.makedirs(channel_path, exist_ok=True) frozen_model = encode_indent(model) with open(join(channel_path, "regressor_model.json"), "w") as json_file: json_file.write(frozen_model) model._save_internals(channel_path) return frozen
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, folder):\n self.generator.save_weights('%s/generator.h5'%folder)\n self.critic.save_weights('%s/critic.h5'%folder)", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individu...
[ "0.56438905", "0.5583958", "0.5536748", "0.5505436", "0.5465258", "0.5457156", "0.5457156", "0.5457156", "0.5444318", "0.54246444", "0.54233265", "0.5392205", "0.53767943", "0.53724295", "0.5370799", "0.5340823", "0.5336824", "0.53367984", "0.5329921", "0.5313599", "0.530012"...
0.59676516
0
Returns an 'allbatteriesincluded' regressor from a given path (folder).
def load(path: str): lprint(f"Loading regressor from: {path}") with open(join(path, "regressor.json"), "r") as json_file: frozen = json_file.read() thawed = jsonpickle.decode(frozen) thawed.models = [] for i in range(thawed.num_channels): channel_path = join(path, f"channel{i}") lprint(f"Loading regressor model for channel {i} from: {path}") with open(join(channel_path, "regressor_model.json"), "r") as json_file: frozen_model = json_file.read() thawed_model = jsonpickle.decode(frozen_model) thawed_model._load_internals(channel_path) thawed.models.append(thawed_model) return thawed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_drivers(dirpath):\n\n return all_drivers", "def get_all(folder, filter_funs = []) :\n\n def apply_funs(x, funs) :\n \"\"\"Applies the filter functions.\"\"\"\n res = True\n for f in funs :\n res = f(x)\n if not res :\n break\n return res\n \n final = {}\n files = listd...
[ "0.5379837", "0.5044664", "0.49726188", "0.49551898", "0.4940935", "0.4874986", "0.48747087", "0.47927487", "0.47669888", "0.47309345", "0.46937668", "0.46731636", "0.4670935", "0.46418554", "0.46351552", "0.45951054", "0.45918998", "0.45842183", "0.4548384", "0.45425668", "0...
0.5162948
1
simulation of a single game
def mc_trial(board, player): if len(board.get_empty_squares()) > 0: gra_w_toku = True else: gra_w_toku = False while gra_w_toku: tupka = random.choice(board.get_empty_squares()) board.move(tupka[0], tupka[1], player) status = board.check_win() if status == player or status == provided.DRAW: gra_w_toku = not gra_w_toku player = provided.switch_player(player) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_simulation(self, state):\n \"*** YOUR CODE HERE ***\"\n player = 0\n visited_states = [(player, state)]\n depth_limited = self.depth != -1\n depth = self.depth\n expand = True\n while not visited_states[-1][1].isWin() and not visited_states[-1][1].isLose():\...
[ "0.76476705", "0.7423586", "0.7248727", "0.69607615", "0.6947179", "0.6916251", "0.69077504", "0.6889119", "0.68533415", "0.6840715", "0.68284386", "0.6805519", "0.6801347", "0.6782698", "0.67796993", "0.6775046", "0.66829413", "0.6681817", "0.6652152", "0.66504765", "0.66499...
0.0
-1
return nothing but updating a score
def mc_update_scores(scores, board, player): status = board.check_win() if status == provided.DRAW: pass if status == player: for row in range(board.get_dim()): for col in range(board.get_dim()): znak = board.square(row, col) helper(True, znak, player, scores, row, col) if status == provided.switch_player(player): for row in range(board.get_dim()): for col in range(board.get_dim()): znak = board.square(row, col) helper(False, znak, player, scores, row, col) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_score():\n pass", "def updateScore(score):\n return score + 1", "def update_score(self, score: int) -> int:\n self.score += score\n return self.score", "def score(self):", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.g...
[ "0.88064134", "0.8144664", "0.77495396", "0.76195884", "0.7563196", "0.74926865", "0.7427741", "0.73870414", "0.7202365", "0.7133871", "0.710654", "0.7085516", "0.70635676", "0.706069", "0.7026134", "0.7020511", "0.70177984", "0.7006921", "0.70053643", "0.6973539", "0.6971714...
0.63917595
82
return a move for the machine player in the form of a (row, column) tuple.
def mc_move(board, player, trials): grid_of_scores = [[0 for dummy_i in range(board.get_dim())] for dummy_j in range(board.get_dim())] test_board = board.clone() for dummy_i in range(trials): mc_trial(test_board, player) mc_update_scores(grid_of_scores, test_board, player) test_board = board.clone() best_move = get_best_move(board, grid_of_scores) return best_move
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def get_ai_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_ai_move(board, player):\n row, col = 0, 0\n return row, col", ...
[ "0.84373444", "0.8416715", "0.7964209", "0.79255193", "0.72848445", "0.7280993", "0.7246106", "0.7221404", "0.7153344", "0.7151742", "0.70832056", "0.7001192", "0.689266", "0.6870447", "0.6862894", "0.6862894", "0.6854077", "0.684734", "0.684734", "0.6843987", "0.68329155", ...
0.0
-1
Train the crf tagger based on the training data.
def _train_model(self, df_train): # type: (List[List[Tuple[Text, Text, Text, Text]]]) -> None import sklearn_crfsuite X_train = [self._sentence_to_features(sent) for sent in df_train] y_train = [self._sentence_to_labels(sent) for sent in df_train] from itertools import chain import nltk import sklearn import scipy.stats from sklearn.metrics import make_scorer from sklearn.model_selection import cross_val_score from sklearn.model_selection import RandomizedSearchCV import sklearn_crfsuite from sklearn_crfsuite import scorers from sklearn_crfsuite import metrics X_train = [self._sentence_to_features(sent) for sent in df_train] y_train = [self._sentence_to_labels(sent) for sent in df_train] if self.component_config["grid_search"]: self.ent_tagger = sklearn_crfsuite.CRF( algorithm='lbfgs', # stop earlier max_iterations=self.component_config["max_iterations"], # include transitions that are possible, but not observed all_possible_transitions=True ) self.ent_tagger.fit(X_train, y_train) params_space = { 'c1': scipy.stats.expon(scale=0.5), 'c2': scipy.stats.expon(scale=0.5), } labels = self.ent_tagger.classes_ # use the same metric for evaluation f1_scorer = make_scorer(metrics.flat_f1_score, average='weighted', labels=labels) # search rs = RandomizedSearchCV(self.ent_tagger, params_space, cv=10, verbose=1, n_jobs=-1, n_iter=100, scoring=f1_scorer) rs.fit(X_train, y_train) print('best params:', rs.best_params_) print('best CV score:', rs.best_score_) print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000)) try: import json with open("tunning_score.json", "w") as f: json.dump(rs.best_params_, f, sort_keys=True, indent=4) except Exception: pass self.ent_tagger = sklearn_crfsuite.CRF( algorithm='lbfgs', c1=rs.best_params_["c1"], c2=rs.best_params_["c2"], # stop earlier max_iterations=self.component_config["max_iterations"], # include transitions that are possible, but not observed all_possible_transitions=True ) else: print("L1_c", self.component_config["L1_c"]) print("L2_c", self.component_config["L2_c"]) self.ent_tagger = sklearn_crfsuite.CRF( algorithm='lbfgs', # coefficient for L1 penalty c1=self.component_config["L1_c"], # coefficient for L2 penalty c2=self.component_config["L2_c"], # stop earlier max_iterations=self.component_config["max_iterations"], # include transitions that are possible, but not observed all_possible_transitions=True ) self.ent_tagger.fit(X_train, y_train)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_crf(ctx, input, output, clusters):\n click.echo('chemdataextractor.crf.train')\n sentences = []\n for line in input:\n sentence = []\n for t in line.split():\n token, tag, iob = t.rsplit('/', 2)\n sentence.append(((token, tag), iob))\n if sentence:\n ...
[ "0.70876074", "0.7059196", "0.6855058", "0.6802832", "0.6785829", "0.67681587", "0.67512184", "0.6747457", "0.67143035", "0.6711", "0.66792077", "0.66118145", "0.660746", "0.66058165", "0.66058165", "0.66058165", "0.66058165", "0.66058165", "0.65927374", "0.6588397", "0.65883...
0.6713298
9
Excludes where learners played against learners
def get_reward_lists(f): with open(f, 'r',newline='') as f: lines = f.readlines() return lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exclude_words(self, words):\n idcs = []\n for i in range(len(self)):\n if not self.transcript(i) in words:\n idcs.append(i)\n subset = self.sub_set(idcs)\n return subset", "def prune_teachers(self):\n self.teacher_policies = self.teacher_policies[:...
[ "0.65722376", "0.5929489", "0.58905196", "0.5826653", "0.58197314", "0.5794661", "0.5784924", "0.5743494", "0.5724692", "0.56861764", "0.5636551", "0.56264764", "0.5577885", "0.5564499", "0.5544913", "0.5514717", "0.55069286", "0.55069286", "0.5471333", "0.5468246", "0.545329...
0.0
-1
loads the next N images from the binary mraw file into a numpy array.
def load_images(mraw, h, w, N, bit=16, roll_axis=True): if int(bit) == 16: images = np.memmap(mraw, dtype=np.uint16, mode='r', shape=(N, h, w)) elif int(bit) == 8: images = np.memmap(mraw, dtype=np.uint8, mode='r', shape=(N, h, w)) elif int(bit) == 12: warnings.warn("12bit images will be loaded into memory!") #images = _read_uint12_video(mraw, (N, h, w)) images = _read_uint12_video_prec(mraw, (N, h, w)) else: raise Exception(f"Unsupported bit depth: {bit}") #images=np.fromfile(mraw, dtype=np.uint16, count=h * w * N).reshape(N, h, w) # about a 1/3 slower than memmap when loading to RAM. Also memmap doesn't need to read to RAM but can read from disc when needed. if roll_axis: return np.rollaxis(images, 0, 3) else: return images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def le_binario_mgbq(filebin,nt,nc):\n return np.fromfile(fileb...
[ "0.6541903", "0.63630474", "0.6321544", "0.63167053", "0.6276605", "0.6192698", "0.6192698", "0.6186343", "0.61686295", "0.61686295", "0.61686295", "0.61686295", "0.61686295", "0.61686295", "0.61289364", "0.61266387", "0.61094004", "0.6033004", "0.60216844", "0.59940916", "0....
0.68674904
0
Loads and returns images and a cih info dict.
def load_video(cih_file): cih = get_cih(cih_file) mraw_file = path.splitext(cih_file)[0] + '.mraw' N = cih['Total Frame'] h = cih['Image Height'] w = cih['Image Width'] bit = cih['Color Bit'] images = load_images(mraw_file, h, w, N, bit, roll_axis=False) return images, cih
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n ...
[ "0.69167143", "0.67644393", "0.65785575", "0.64346397", "0.64260566", "0.6405014", "0.6335255", "0.630171", "0.63005936", "0.62530017", "0.6173102", "0.61003965", "0.6039576", "0.60270804", "0.6020482", "0.6004541", "0.59933895", "0.59829694", "0.59729624", "0.5952639", "0.59...
0.0
-1
Saves given sequence of images into .mraw file.
def save_mraw(images, save_path, bit_depth=16, ext='mraw', info_dict={}): filename, extension = path.splitext(save_path) mraw_path = '{:s}.{:s}'.format(filename, ext) cih_path = '{:s}.{:s}'.format(filename, '.cih') directory_path = path.split(save_path)[0] if not path.exists(directory_path): os.makedirs(directory_path) bit_depth_dtype_map = { 8: np.uint8, 16: np.uint16 } if bit_depth not in bit_depth_dtype_map.keys(): raise ValueError('Currently supported bit depths are 8 and 16.') if bit_depth < 16: effective_bit = bit_depth else: effective_bit = 12 if np.max(images) > 2**bit_depth-1: raise ValueError( 'The input image data does not match the selected bit depth. ' + 'Consider normalizing the image data before saving.') # Generate .mraw file with open(mraw_path, 'wb') as file: for image in images: image = image.astype(bit_depth_dtype_map[bit_depth]) image.tofile(file) file_shape = (int(len(images)), image.shape[0], image.shape[1]) file_format = 'MRaw' image_info = {'Record Rate(fps)': '{:d}'.format(1), 'Shutter Speed(s)': '{:.6f}'.format(1), 'Total Frame': '{:d}'.format(file_shape[0]), 'Original Total Frame': '{:d}'.format(file_shape[0]), 'Start Frame': '{:d}'.format(0), 'Image Width': '{:d}'.format(file_shape[2]), 'Image Height': '{:d}'.format(file_shape[1]), 'Color Type': 'Mono', 'Color Bit': bit_depth, 'File Format' : file_format, 'EffectiveBit Depth': effective_bit, 'Comment Text': 'Generated sequence. Modify measurement info in created .cih file if necessary.', 'EffectiveBit Side': 'Lower'} image_info.update(info_dict) cih_path = '{:s}.{:s}'.format(filename, 'cih') with open(cih_path, 'w') as file: file.write('#Camera Information Header\n') for key in image_info.keys(): file.write('{:s} : {:s}\n'.format(key, str(image_info[key]))) return mraw_path, cih_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(images, output):\n for image, frame in images:\n image.save(output(frame))", "def save_step_1(imgs, output_path='./output/step1'):\n # ... your code here ...\n i=0\n for each in imgs:\n i+=1\n cv2.imwrite(output_path+\"/output\"+str(i)+\".jpg\", each)", "def saveFrames...
[ "0.711835", "0.70112777", "0.6918421", "0.6822654", "0.6764862", "0.6758189", "0.67128646", "0.67098147", "0.6670487", "0.665418", "0.66379416", "0.6584189", "0.6560411", "0.651001", "0.6488647", "0.6457336", "0.6429019", "0.6422496", "0.6414986", "0.638881", "0.6349937", "...
0.67297393
6
Utility function to read 12bit packed mraw files into uint16 array Will store entire array in memory!
def _read_uint12_video(data, shape): data = np.memmap(data, dtype=np.uint8, mode="r") fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4) snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8 return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unpack_mraw_frame_12bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*12/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*12/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::3]\n bytes_2 = int_arra...
[ "0.68789697", "0.68381524", "0.66513246", "0.6528", "0.6359484", "0.6214181", "0.6206337", "0.6118009", "0.60529387", "0.60003304", "0.5936918", "0.593421", "0.59340906", "0.5930919", "0.58940655", "0.5846712", "0.58181554", "0.5809074", "0.5799255", "0.5763211", "0.5753879",...
0.70496243
0
Utility function to read 12bit packed mraw files into uint16 array Will store entire array in memory!
def _read_uint12_video_prec(data, shape): data = np.memmap(data, dtype=np.uint8, mode="r") return nb_read_uint12(data).reshape(shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_uint12_video(data, shape):\n data = np.memmap(data, dtype=np.uint8, mode=\"r\")\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n return ...
[ "0.7049365", "0.6879431", "0.6836875", "0.65290755", "0.6360394", "0.621499", "0.6207839", "0.61189526", "0.60546416", "0.60031", "0.59370583", "0.5934907", "0.5933406", "0.5932318", "0.5895518", "0.5847376", "0.5819832", "0.5810755", "0.5799669", "0.5764724", "0.5755548", ...
0.66510236
3
precompiled function to efficiently covnert from 12bit packed video to 16bit video it splits 3 bytes into two 16 bit words data_chunk is a contigous 1D array of uint8 data, e.g. the 12bit video loaded as 8bit array
def nb_read_uint12(data_chunk): #ensure that the data_chunk has the right length assert np.mod(data_chunk.shape[0],3)==0 out = np.empty(data_chunk.size//3*2, dtype=np.uint16) for i in nb.prange(data_chunk.shape[0]//3): fst_uint8=np.uint16(data_chunk[i*3]) mid_uint8=np.uint16(data_chunk[i*3+1]) lst_uint8=np.uint16(data_chunk[i*3+2]) out[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4) out[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8 return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_uint12_video(data, shape):\n data = np.memmap(data, dtype=np.uint8, mode=\"r\")\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n return ...
[ "0.6967046", "0.61986065", "0.6189992", "0.61811805", "0.6017552", "0.60136306", "0.59860003", "0.5980767", "0.58180374", "0.5771538", "0.5734991", "0.5357844", "0.53522146", "0.53145593", "0.52839905", "0.52355176", "0.5201002", "0.5169548", "0.51465833", "0.5143027", "0.513...
0.6670928
1
Get item from self.human_data.
def prepare_raw_data(self, idx: int): info = super().prepare_raw_data(idx) if self.cache_reader is not None: self.human_data = self.cache_reader.get_item(idx) idx = idx % self.cache_reader.slice_size if 'smplx' in self.human_data: smplx_dict = self.human_data['smplx'] info['has_smplx'] = 1 else: smplx_dict = {} info['has_smplx'] = 0 if 'global_orient' in smplx_dict: info['smplx_global_orient'] = smplx_dict['global_orient'][idx] info['has_smplx_global_orient'] = 1 else: info['smplx_global_orient'] = np.zeros((3), dtype=np.float32) info['has_smplx_global_orient'] = 0 if 'body_pose' in smplx_dict: info['smplx_body_pose'] = smplx_dict['body_pose'][idx] info['has_smplx_body_pose'] = 1 else: info['smplx_body_pose'] = np.zeros((21, 3), dtype=np.float32) info['has_smplx_body_pose'] = 0 if 'right_hand_pose' in smplx_dict: info['smplx_right_hand_pose'] = smplx_dict['right_hand_pose'][idx] info['has_smplx_right_hand_pose'] = 1 else: info['smplx_right_hand_pose'] = np.zeros((15, 3), dtype=np.float32) info['has_smplx_right_hand_pose'] = 0 if 'left_hand_pose' in smplx_dict: info['smplx_left_hand_pose'] = smplx_dict['left_hand_pose'][idx] info['has_smplx_left_hand_pose'] = 1 else: info['smplx_left_hand_pose'] = np.zeros((15, 3), dtype=np.float32) info['has_smplx_left_hand_pose'] = 0 if 'jaw_pose' in smplx_dict: info['smplx_jaw_pose'] = smplx_dict['jaw_pose'][idx] info['has_smplx_jaw_pose'] = 1 else: info['smplx_jaw_pose'] = np.zeros((3), dtype=np.float32) info['has_smplx_jaw_pose'] = 0 if 'betas' in smplx_dict: info['smplx_betas'] = smplx_dict['betas'][idx] info['has_smplx_betas'] = 1 else: info['smplx_betas'] = np.zeros((self.num_betas), dtype=np.float32) info['has_smplx_betas'] = 0 if 'expression' in smplx_dict: info['smplx_expression'] = smplx_dict['expression'][idx] info['has_smplx_expression'] = 1 else: info['smplx_expression'] = np.zeros((self.num_expression), dtype=np.float32) info['has_smplx_expression'] = 0 return info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_item(self):\n return self.item", "def get_item(self):\n return self.item", "def __getitem__(self, item):\n return self._data[item]", "def __getitem__(self, item):\n return self.data[item]", "def __getitem__(self, item):\n return self.data[item]", "def __getitem_...
[ "0.7406864", "0.7406864", "0.73144776", "0.7309078", "0.7309078", "0.7309078", "0.7219982", "0.71420383", "0.71420383", "0.71420383", "0.70934916", "0.70712644", "0.7018385", "0.7018385", "0.70036954", "0.70014024", "0.7000123", "0.6842417", "0.6842417", "0.6842417", "0.67811...
0.0
-1
compute the 3DRMSE between a predicted 3D face shape and the 3D ground truth scan.
def _report_3d_rmse(self, res_file): pred_vertices, gt_vertices, _ = self._parse_result( res_file, mode='vertice') pred_keypoints3d, gt_keypoints3d, _ = self._parse_result( res_file, mode='keypoint') errors = [] for pred_vertice, gt_vertice, pred_points, gt_points in zip( pred_vertices, gt_vertices, pred_keypoints3d, gt_keypoints3d): error = fg_vertices_to_mesh_distance(gt_vertice, gt_points, pred_vertice, self.body_model.faces, pred_points) errors.append(error) error = np.array(errors).mean() name_value_tuples = [('3DRMSE', error)] return name_value_tuples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_3d(self, box_point_3d, instance):\n azimuth_error, polar_error = self.evaluate_viewpoint(box_point_3d, instance)\n iou = self.evaluate_iou(box_point_3d, instance)\n return azimuth_error, polar_error, iou", "def get_3d_points(preds_3d):\n for i,p in enumerate(preds_3d):\n preds_3d[...
[ "0.57921743", "0.5499897", "0.5472451", "0.52560514", "0.5251211", "0.52303135", "0.51892173", "0.518625", "0.5166288", "0.5151563", "0.514771", "0.511312", "0.5067872", "0.50428575", "0.5011078", "0.49873042", "0.49712166", "0.49459213", "0.49200428", "0.4919663", "0.4918488...
0.60341465
0
Evaluate 3D keypoint results.
def evaluate(self, outputs: list, res_folder: str, metric: Optional[Union[str, List[str]]] = 'pa-mpjpe', **kwargs: dict): metrics = metric if isinstance(metric, list) else [metric] for metric in metrics: if metric not in self.ALLOWED_METRICS: raise KeyError(f'metric {metric} is not supported') # for keeping correctness during multi-gpu test, we sort all results res_dict = {} for out in outputs: target_id = out['image_idx'] batch_size = len(out['keypoints_3d']) for i in range(batch_size): res_dict[int(target_id[i])] = dict( keypoints=out['keypoints_3d'][i], vertices=out['vertices'][i], ) keypoints, vertices = [], [] for i in range(self.num_data): keypoints.append(res_dict[i]['keypoints']) vertices.append(res_dict[i]['vertices']) keypoints = np.stack(keypoints) vertices = np.stack(vertices) res = dict(keypoints=keypoints, vertices=vertices) name_value_tuples = [] for index, _metric in enumerate(metrics): if 'body_part' in kwargs: body_parts = kwargs['body_part'][index] for body_part in body_parts: if _metric == 'pa-mpjpe': _nv_tuples = self._report_mpjpe( res, metric='pa-mpjpe', body_part=body_part) elif _metric == 'pa-pve': _nv_tuples = self._report_pve( res, metric='pa-pve', body_part=body_part) else: raise NotImplementedError name_value_tuples.extend(_nv_tuples) else: if _metric == 'mpjpe': _nv_tuples = self._report_mpjpe(res) elif _metric == 'pa-mpjpe': _nv_tuples = self._report_mpjpe(res, metric='pa-mpjpe') elif _metric == '3dpck': _nv_tuples = self._report_3d_pck(res) elif _metric == 'pa-3dpck': _nv_tuples = self._report_3d_pck(res, metric='pa-3dpck') elif _metric == '3dauc': _nv_tuples = self._report_3d_auc(res) elif _metric == 'pa-3dauc': _nv_tuples = self._report_3d_auc(res, metric='pa-3dauc') elif _metric == 'pve': _nv_tuples = self._report_pve(res) elif _metric == 'pa-pve': _nv_tuples = self._report_pve(res, metric='pa-pve') elif _metric == '3DRMSE': _nv_tuples = self._report_3d_rmse(res) else: raise NotImplementedError name_value_tuples.extend(_nv_tuples) name_value = OrderedDict(name_value_tuples) return name_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_3d(self, box_point_3d, instance):\n azimuth_error, polar_error = self.evaluate_viewpoint(box_point_3d, instance)\n iou = self.evaluate_iou(box_point_3d, instance)\n return azimuth_error, polar_error, iou", "def _get_data_on_3d_points(self, varname, record, points):\n if self.get_mesh...
[ "0.7018151", "0.6458969", "0.6291701", "0.618679", "0.6173702", "0.6046015", "0.59891486", "0.59870636", "0.59073186", "0.5893742", "0.58693314", "0.5850453", "0.57705677", "0.57656455", "0.57517314", "0.5728884", "0.5630404", "0.561461", "0.5612338", "0.5607166", "0.5588802"...
0.0
-1
Print lots of basic information about the given presentation.
def debug_dump(prs:Presentation): print("Presentation has", len(prs.slides), "slides") # Print summary of all slides, plus text n = 0 for slide in prs.slides: n += 1 print("========== slide {} ========== [{}]".format(n, slide.slide_layout.name)) for shape in slide.shapes: if not shape.has_text_frame: continue print(shape.name) for paragraph in shape.text_frame.paragraphs: for run in paragraph.runs: print(" " + run.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Print(self):\n\n\t\tif self.verbose:\n\n\t\t print (\"\\033[1m[HEADER]\\033[0m\")\n\t\t print (\"code:\\t\\t%s\" % self.kod)\n\t \tprint (\"version:\\t%s\" % self.ver)\n\t\t print (\"date and time:\\t%s\" % self.probid)\n\t\t print (\"dump number:\\t%s\" % self.knod)\n\t ...
[ "0.6639509", "0.6288107", "0.6238363", "0.62220323", "0.6204376", "0.6180137", "0.6151986", "0.61489", "0.6128701", "0.6115834", "0.6007176", "0.59872526", "0.59838665", "0.596641", "0.59656215", "0.59383917", "0.59287095", "0.5924039", "0.590953", "0.5906681", "0.5902103", ...
0.7133029
0
Count the amount of text in each slide.
def get_word_counts(slides) -> List[int]: word_count = [] for slide in slides: # print(f"========== slide {len(text_count)+1} ========== [{slide.slide_layout.name}]") words = 0 # find all text for shape in slide.shapes: if not shape.has_text_frame: continue # print(shape.name) for paragraph in shape.text_frame.paragraphs: for run in paragraph.runs: # print(" " + run.text) words += len(run.text.split()) word_count.append(words) return word_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_slide_analytics_new(slides) -> List[int]:\n word_count = []\n for slide in slides:\n print(slide)\n words = 0\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n print(shape.name)\n for paragraph in shape.text_f...
[ "0.7645021", "0.699523", "0.6631901", "0.65998024", "0.65277624", "0.65032226", "0.63644123", "0.63634205", "0.6349974", "0.6309825", "0.6171061", "0.6156245", "0.6148901", "0.60785764", "0.60774815", "0.6072669", "0.603562", "0.59676707", "0.59530956", "0.59443533", "0.59265...
0.75697744
1
Calculates a onetofive star ranking for presentations that are not too textheavy.
def calculate_text_stars(word_counts) -> int: if word_counts == []: return 3 words_per_slide = sum(word_counts) / len(word_counts) stars = 5 - abs(words_per_slide - 35) / 8 # print(stars) return max(0, min(5, int(stars + 0.5)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(...
[ "0.5887334", "0.57918924", "0.57401806", "0.5696041", "0.56602454", "0.56410843", "0.5623116", "0.5621459", "0.55914843", "0.5587887", "0.55824214", "0.55516285", "0.553804", "0.55314225", "0.5495516", "0.54903185", "0.5481853", "0.5480828", "0.54778075", "0.54706895", "0.546...
0.5987914
0
Counts how many times each PPT layout is used. Returns the total number of interactive layouts, plus a dictionary of the layout counts.
def count_layouts(prs:Presentation) -> Tuple[int, Dict[str, int]]: layouts = collections.defaultdict(int) layouts_interactive = 0 for slide in prs.slides: layouts[slide.slide_layout.name] += 1 if slide.slide_layout.name in INTERACTIVE: layouts_interactive += 1 return (layouts_interactive, layouts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNumLayouts(self):\n return _libsbml.LayoutModelPlugin_getNumLayouts(self)", "def pobj_counts(pcode_obj):\n pcode = (pcode_obj.asDict())['pcode'][0] # no multiple pcode blocks - no delimiter\n counts = {'galleries': 0, 'spreads': 0, 'layouts': 0, 'panelgroups': 0}\n # , 'panels': 0, 'skips...
[ "0.6073172", "0.60642576", "0.5786426", "0.57859993", "0.57683474", "0.5574106", "0.54529566", "0.5449059", "0.539238", "0.53353953", "0.5332541", "0.53282636", "0.53045416", "0.52998465", "0.5298258", "0.52905923", "0.5276604", "0.52639884", "0.5256031", "0.52523476", "0.523...
0.8039856
0
Count the amount of text in each slide.
def get_slide_analytics_new(slides) -> List[int]: word_count = [] for slide in slides: print(slide) words = 0 for shape in slide.shapes: if not shape.has_text_frame: continue print(shape.name) for paragraph in shape.text_frame.paragraphs: for run in paragraph.runs: print(" " + run.text) words += len(run.text.split()) word_count.append(words) return word_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_word_counts(slides) -> List[int]:\n word_count = []\n for slide in slides:\n # print(f\"========== slide {len(text_count)+1} ========== [{slide.slide_layout.name}]\")\n words = 0\n # find all text\n for shape in slide.shapes:\n if not shape.has_text_frame:\n ...
[ "0.75697744", "0.699523", "0.6631901", "0.65998024", "0.65277624", "0.65032226", "0.63644123", "0.63634205", "0.6349974", "0.6309825", "0.6171061", "0.6156245", "0.6148901", "0.60785764", "0.60774815", "0.6072669", "0.603562", "0.59676707", "0.59530956", "0.59443533", "0.5926...
0.7645021
0
Analyses a presentation and returns a dictionary of starratings, plus extra details. Works best on presentations that use the LIFTS template, with known layout names.
def analyse_presentation(pres_name:str, verbose=False) -> Dict[str, Any]: prs = Presentation(pres_name) if verbose: debug_dump(prs) (layouts_interactive, layouts) = count_layouts(prs) interaction_stars = min(layouts_interactive, 5) topic_stars = ([1,1,3,5,5,4,3,2,1]+[1]*100)[layouts["Section Header"]] pres_properties = get_presentation_properties(prs) word_count = get_word_counts(prs.slides) words_per_slide = sum(word_count) / len(word_count) # ideal words/slide is 30-40 (5 stars) text_stars = calculate_text_stars(word_count) # print("word counts:", word_count) # Create a list of warnings about very text-heavy slides heavy_warnings = [] for slide, words in enumerate(word_count): if words > MAX_WORDS_PER_SLIDE: heavy_warnings.append(f"WARNING: slide {slide} has {words} words!") slides = get_slide_analytics(prs.slides) print(slides) result = { "presentation_rating_stars_interaction": interaction_stars, "presentation_rating_stars_section": topic_stars, "presentation_rating_stars_accessibility": 3, # not implemented yet! "presentation_rating_stars_text": text_stars, "presentation_count_slide": len(prs.slides), "presentation_count_layout": layouts, # dictionary that maps layout name to count "presentation_total_words": words_per_slide, # a float "presentation_warning_text_heavy": heavy_warnings, # a list of warning strings "presentation_data_slides": slides, # a list of slides and analytics "filename": pres_name, # TODO: strip any Path and just return file name? "name": "ICT999", "description": "Introduction to ICT" } return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyze_pptx(template_file):\n prs = Presentation(template_file)\n # Each powerpoint file has multiple layouts\n # Loop through them all and see where the various elements are\n slide_masters = prs.slide_masters\n for index, slide_master in enumerate(prs.slide_masters):\n print('--------...
[ "0.57579595", "0.5754335", "0.55619776", "0.545294", "0.5446325", "0.51942104", "0.51507777", "0.51323617", "0.5105886", "0.5052227", "0.5037823", "0.49726292", "0.4881835", "0.4851375", "0.48495308", "0.48304343", "0.48082933", "0.47642702", "0.4755273", "0.47469255", "0.474...
0.74647856
0
Return size of the dataset
def __len__(self): return self.get_num_sequence()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataset_size(self):\n return self.dataset.size", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def dataset_size(self):\n if not self._dataset_size:\n # pylint: disable=attribute-defined-outside-init\n self._dataset_size = count_file_lines(\n ...
[ "0.8979698", "0.8526949", "0.82585645", "0.81426746", "0.8124066", "0.81118333", "0.8078935", "0.80428666", "0.8037988", "0.8028507", "0.8013156", "0.80074346", "0.80074346", "0.80074346", "0.80074346", "0.7973197", "0.7925562", "0.7925562", "0.7925562", "0.7925562", "0.79255...
0.0
-1
Number of studies in a dataset.
def get_num_sequence(self): return len(self.study_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_datasets(self, data):\n dsets = set()\n for items in data:\n dsetid = items[3]\n dsets.add(dsetid)\n return len(dsets)", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def num_students(self,dd=\"\"):\n\t\tif dd==\"\":\n\t\t\tdd=dat...
[ "0.6843366", "0.67893463", "0.6728229", "0.64195675", "0.62901825", "0.6255575", "0.6206059", "0.6124002", "0.61171126", "0.6110871", "0.6106557", "0.6096237", "0.607423", "0.60537356", "0.6051849", "0.60323846", "0.6030147", "0.60260516", "0.5990028", "0.59870434", "0.596654...
0.55716085
89
Name of the dataset.
def get_name(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataset_name(self):\n return self.dataset.name", "def dataset_name(self):\n return self._dataset_name", "def get_dataset_name(self):\n raise NotImplementedError", "def get_dataset_name(self):\n return self.dataset_name", "def __get_dataset_name(self):\n d = gdal.Open(...
[ "0.919469", "0.8976905", "0.87083614", "0.8686752", "0.7850114", "0.7471211", "0.7451666", "0.74262846", "0.7378664", "0.73779565", "0.7199527", "0.70992815", "0.70992815", "0.70992815", "0.6962095", "0.6886322", "0.6757635", "0.6693697", "0.667641", "0.6673114", "0.6660524",...
0.0
-1
Not to be used! Check get_frames() instead.
def __getitem__(self, item): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frames():\n raise RuntimeError('Must be implemented by subclasses.')", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, d...
[ "0.7640561", "0.69378716", "0.69378716", "0.69378716", "0.69378716", "0.69378716", "0.69195557", "0.67941016", "0.67867607", "0.6755174", "0.6755174", "0.6755174", "0.6755174", "0.6722964", "0.6581867", "0.6483949", "0.6414792", "0.6398254", "0.63598907", "0.63598907", "0.635...
0.0
-1
Return information about a particular squences.
def get_study_info(self,std_id): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSongTextInfo():\n sids = []\n documents = []\n sFile = open('../txt/two__Lastfm_song_Docs.txt')\n lines = sFile.readlines()\n index = 0\n for line in lines:\n line.strip('\\n')\n line.strip('\\r\\n')\n items = line.split('>>')\n sid = int(items[0])\n text = items[1]\n documents.appen...
[ "0.5863178", "0.5489143", "0.54585916", "0.5428158", "0.536509", "0.53436375", "0.53430116", "0.53390235", "0.532284", "0.529443", "0.52938986", "0.5290507", "0.52477586", "0.5244924", "0.5206089", "0.5205835", "0.5202415", "0.51880074", "0.51695544", "0.513454", "0.51211977"...
0.55018765
1
Get a set of frames from a particular study.
def get_frames(self,std_id, frame_ids, anno=None): raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_frames_for_sample(sample):\n path = os.path.join('data', sample[0])\n filename = sample[1]\n images = sorted(glob.glob(os.path.join(path, filename + '*jpg')))\n return images", "def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()", "def getQiimeSffSamples(self, s...
[ "0.6049885", "0.59706116", "0.57190984", "0.5599101", "0.5508475", "0.55001473", "0.54696465", "0.5453543", "0.5443962", "0.54344064", "0.5425413", "0.53727776", "0.53502667", "0.53314114", "0.5238596", "0.5207204", "0.51428056", "0.5139112", "0.51072145", "0.509223", "0.5054...
0.58449024
2
Returns definitions of module output ports.
def output_types(self) -> Optional[Dict[str, NeuralType]]: return { 'input_ids': NeuralType(('B', 'T'), ChannelType()), 'segment_ids': NeuralType(('B', 'T'), ChannelType()), 'input_mask': NeuralType(('B', 'T'), MaskType()), "labels": NeuralType( tuple('B'), RegressionValuesType() if self.task_name == 'sts-b' else CategoricalValuesType() ), }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getOutputPortsInfo(self):\n return [(gport.parentItem().module, gport.port, gport.controller.get_connections_from(gport.controller.current_pipeline, [gport.parentItem().module.id], gport.port.name), (gport.parentItem().boundingRect().right()-gport.parentItem().boundingRect().left())/2) for gport in self...
[ "0.7231068", "0.7108193", "0.7017519", "0.698227", "0.68667525", "0.6826521", "0.6819943", "0.67742664", "0.6712915", "0.6690715", "0.66691464", "0.6668941", "0.66622776", "0.66509235", "0.6629479", "0.66278684", "0.6618243", "0.6612363", "0.6601604", "0.6582517", "0.65754664...
0.0
-1
Loads a data file into a list of `InputBatch`s.
def convert_examples_to_features( self, examples: List[str], label_list: List[int], max_seq_length: int, tokenizer: TokenizerSpec, output_mode: str, bos_token: str = None, eos_token: str = '[SEP]', pad_token: str = '[PAD]', cls_token: str = '[CLS]', sep_token_extra: str = None, cls_token_at_end: bool = False, cls_token_segment_id: int = 0, pad_token_segment_id: int = 0, pad_on_left: bool = False, mask_padding_with_zero: bool = True, sequence_a_segment_id: int = 0, sequence_b_segment_id: int = 1, ): label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in enumerate(examples): if example.label == "-": # skip examples without a consensus label (e.g. in SNLI data set) continue if ex_index % 10000 == 0: logging.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.text_to_tokens(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.text_to_tokens(example.text_b) special_tokens_count = 2 if eos_token else 0 special_tokens_count += 1 if sep_token_extra else 0 special_tokens_count += 2 if bos_token else 0 special_tokens_count += 1 if cls_token else 0 self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count) else: special_tokens_count = 1 if eos_token else 0 special_tokens_count += 1 if sep_token_extra else 0 special_tokens_count += 1 if bos_token else 0 if len(tokens_a) > max_seq_length - special_tokens_count: tokens_a = tokens_a[: max_seq_length - special_tokens_count] # Add special tokens to sequence_a tokens = tokens_a if bos_token: tokens = [bos_token] + tokens if eos_token: tokens += [eos_token] segment_ids = [sequence_a_segment_id] * len(tokens) # Add sequence separator between sequences if tokens_b and sep_token_extra: tokens += [sep_token_extra] segment_ids += [sequence_a_segment_id] # Add special tokens to sequence_b if tokens_b: if bos_token: tokens += [bos_token] segment_ids += [sequence_b_segment_id] tokens += tokens_b segment_ids += [sequence_b_segment_id] * (len(tokens_b)) if eos_token: tokens += [eos_token] segment_ids += [sequence_b_segment_id] # Add classification token - for BERT models if cls_token: if cls_token_at_end: tokens += [cls_token] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) pad_token_id = tokenizer.tokens_to_ids([pad_token])[0] if pad_on_left: input_ids = ([pad_token_id] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids else: input_ids = input_ids + ([pad_token_id] * padding_length) input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) if len(input_ids) != max_seq_length: raise ValueError("input_ids must be of length max_seq_length") if len(input_mask) != max_seq_length: raise ValueError("input_mask must be of length max_seq_length") if len(segment_ids) != max_seq_length: raise ValueError("segment_ids must be of length max_seq_length") if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = np.float32(example.label) else: raise KeyError(output_mode) if ex_index < 5: logging.info("*** Example ***") logging.info("guid: %s" % (example.guid)) logging.info("tokens: %s" % " ".join(list(map(str, tokens)))) logging.info("input_ids: %s" % " ".join(list(map(str, input_ids)))) logging.info("input_mask: %s" % " ".join(list(map(str, input_mask)))) logging.info("segment_ids: %s" % " ".join(list(map(str, segment_ids)))) logging.info("label: %s (id = %d)" % (example.label, label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id) ) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LoadBatch(filename):", "def load_batch(fpath, label_key='labels'):\n f = open(fpath, 'rb')\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_d...
[ "0.7116717", "0.6971949", "0.6867253", "0.66187465", "0.65909564", "0.6522058", "0.6507134", "0.6502703", "0.6472176", "0.64224887", "0.64197946", "0.64197946", "0.6409885", "0.6405037", "0.638099", "0.6297901", "0.62916416", "0.6273321", "0.6232164", "0.6228044", "0.62100035...
0.0
-1
Truncates a sequence pair in place to the maximum length. This will always truncate the longer sequence one token at a time. This makes more sense than truncating an equal percent of tokens from each, since if one sequence is very short then each token that's truncated likely contains more information than a longer sequence.
def _truncate_seq_pair(self, tokens_a: str, tokens_b: str, max_length: int): while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then ea...
[ "0.8350219", "0.8350219", "0.8350219", "0.8334177", "0.83308214", "0.83209616", "0.8302651", "0.83008575", "0.8299758", "0.8299758", "0.8289869", "0.82892597", "0.8288885", "0.8280558", "0.82622397", "0.82547146", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895...
0.78222877
41
Converts examples into TexttoText batches to be used with a model like T5. Inputs are prefixed with a text prompt that indicates the task to perform.
def convert_examples_to_features(self): features = [] for ex_index, example in enumerate(self.examples): if ex_index % 10000 == 0: logging.info(f"Writing example {ex_index} of {len(self.examples)}") text_to_text_query = self.processor.get_t5_prompted_query(example.text_a, example.text_b) enc_query = self.tokenizer.text_to_ids(text_to_text_query) if len(enc_query) > self.max_seq_length: enc_query = enc_query[: self.max_seq_length] dec_query = ( [self.tokenizer.bos_id] + self.tokenizer.text_to_ids(self.processor.label2string(example.label)) + [self.tokenizer.eos_id] ) dec_input = dec_query[:-1] labels = dec_query[1:] features.append([enc_query, dec_input, labels]) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_tokenize_fn(examples):\n sources = examples[config.source_lang]\n targets = examples[config.target_lang]\n model_inputs = config.tokenizer(sources, max_length=config.max_source_length, truncation=True)\n\n # setup the tokenizer for targets,\n # huggingface expects the target tokenized ids ...
[ "0.61118716", "0.592422", "0.58838516", "0.585979", "0.5801184", "0.5781311", "0.57746387", "0.57520574", "0.57209575", "0.56961864", "0.5692046", "0.5686498", "0.5684553", "0.5684334", "0.56032795", "0.5577988", "0.55708474", "0.55665946", "0.55582416", "0.5552899", "0.55346...
0.56218064
14
Converts examples into TexttoText batches to be used with a model like T5. Inputs are prefixed with a text prompt that indicates the task to perform.
def convert_xnli_examples_to_features(self): features = self.features lang_filtered_features = [] for ex_index, example in enumerate(self.examples): language = example.guid.split('-')[1] if language in self.lang_list: lang_filtered_features.append(features[ex_index] + [language]) return lang_filtered_features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_tokenize_fn(examples):\n sources = examples[config.source_lang]\n targets = examples[config.target_lang]\n model_inputs = config.tokenizer(sources, max_length=config.max_source_length, truncation=True)\n\n # setup the tokenizer for targets,\n # huggingface expects the target tokenized ids ...
[ "0.6111713", "0.5924071", "0.58839375", "0.5859931", "0.5800309", "0.5781618", "0.5774486", "0.57522273", "0.57205606", "0.56961256", "0.56917894", "0.5685472", "0.5684877", "0.5684489", "0.5622114", "0.56032103", "0.5577944", "0.55711997", "0.5566548", "0.555741", "0.5552782...
0.0
-1
Export the settings as a DOM node.
def _exportNode(self): node = ZCatalogXMLAdapter._exportNode(self) self._logger.info('Person Catalog settings exported.') return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _exportNode(self):\n node = self._extractProperties()\n self._logger.info('settings exported.')\n return node", "def saveToXml(self) -> org.jdom.Element:\n ...", "def to_xml(self):\r\n element = ET.Element(\"node\")\r\n\r\n element.attrib['name'] = self.name\r\n ...
[ "0.7224327", "0.56396407", "0.5610303", "0.55762726", "0.5313067", "0.5309712", "0.5301959", "0.5281795", "0.52664566", "0.5236637", "0.52181643", "0.5208334", "0.51855683", "0.517911", "0.51764274", "0.51666445", "0.5144142", "0.5129619", "0.5117044", "0.51037055", "0.510011...
0.63378537
1
Import the settings from the DOM node.
def _importNode(self, node): ZCatalogXMLAdapter._importNode(self, node) self._logger.info('Person Catalog settings imported.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _importNode(self, node):\n if self.environ.shouldPurge():\n self._purgeProperties()\n self._initProperties(node)\n self._logger.info('settings imported.')", "def load_settings(self):\n\n self.std = settings.settings", "def __init__(self, settings_xml):\n # The list...
[ "0.6894675", "0.54793346", "0.5375706", "0.5349439", "0.5210967", "0.50370497", "0.4952911", "0.49231037", "0.48926273", "0.48800564", "0.48647884", "0.48480543", "0.4817451", "0.48003417", "0.47981542", "0.4741064", "0.47284755", "0.4716894", "0.46843192", "0.46814933", "0.4...
0.58705205
1
Label watersheds based on Barnes' priority flood algorithm
def watersheds(np.ndarray[dtype=float_t, ndim=2, mode="c"] z, missing_value = 0): cdef np.ndarray[dtype=int_t, ndim=2, mode="c"] output output = np.empty_like(z, dtype='i') priority_flood_c.priority_flood_watersheds_wrapper(z.shape[1], z.shape[0], <float*>z.data, <int*>output.data, missing_value) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def watershed_supervised(graph, seeds):\n \n size_data = graph.shape[0]\n \n u, v, w = sp.sparse.find(graph)\n list_edges = list(zip(u,v,w))\n list_edges.sort(key = lambda x : x[2])\n \n UF = unionfind(size_data)\n labels = np.array(seeds, dtype=np.int32, copy=True)\n \n for e in l...
[ "0.7240329", "0.65720975", "0.62097526", "0.6137392", "0.6120207", "0.5910127", "0.5895485", "0.585058", "0.58383656", "0.5824214", "0.57871556", "0.5696335", "0.5649472", "0.5627702", "0.5602631", "0.5593537", "0.5549893", "0.55149436", "0.5510126", "0.54956084", "0.54879755...
0.51125866
66