query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
takes csv files, parses with panda and returns result
def import_data(csv_file): # skips bad lines data = pd.read_csv(csv_file, error_bad_lines=False) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n ...
[ "0.6957248", "0.6680294", "0.66800976", "0.6676007", "0.6644503", "0.65280634", "0.64709973", "0.6345747", "0.63367206", "0.63359916", "0.6311614", "0.63049614", "0.630401", "0.62790114", "0.62762284", "0.6265179", "0.6254957", "0.6253625", "0.62445545", "0.6242823", "0.62291...
0.61578256
23
cleans data by lowers cases and removing accentuated chars then extracts word tokens of at least 2 chars
def cleanData(s): # extract only word tokens of at least 2 chars re.compile(r"\b\w\w + \b", re.U).findall(s) # xml_dict = {';': '', '&lt': '<', '&amp': '&', '&gt': '>', '&quot': '"', # '&apos': '\''} # for key, value in xml_dict.iteritems(): # s = s.replace(key, value) s.translate(maketrans('?!,.', ' ')) with open('stopwords.txt') as stop_words: stop_words = {line.strip().lower() for line in stop_words if line!='\n'} return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processes_and_tokenize(raw_document):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_document.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\tstop_words = set(nltk.corpus.stopwords.words('english'))\n\t#stop_words =...
[ "0.6861259", "0.6852118", "0.67767626", "0.6744147", "0.67203104", "0.6711012", "0.6704925", "0.66891634", "0.66849273", "0.6602998", "0.6546478", "0.65269834", "0.65201867", "0.65182596", "0.64966595", "0.6496125", "0.64865", "0.6471367", "0.6440876", "0.64231676", "0.640726...
0.60811347
61
Support the following DHCP DeviceManager calls.
def create_dhcp_port(self, port): LOG.debug("create_dhcp_port: %s", port) port['port']['id'] = port['port']['network_id'] # The following MAC address will be assigned to the Linux dummy # interface that # networking_calico.agent.linux.interface.RoutedInterfaceDriver # creates. Therefore it will never actually be used or involved in the # sending or receiving of any real data. Hence it should not matter # that we use a hardcoded value here, and the same value on every # networking-calico compute host. The '2' bit of the first byte means # 'locally administered', which makes sense for a hardcoded value like # this and distinguishes it from the space of managed MAC addresses. port['port']['mac_address'] = '02:00:00:00:00:00' port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP return dhcp.DictModel(port['port'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_dhcp_env(device):\n raise NotImplementedError", "def _RunDHCPCD(self, **kwargs):\n del kwargs\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n # -K: Don't receive link messages for carrier status. You should\n # only have to ...
[ "0.6585499", "0.6445782", "0.63871115", "0.62862945", "0.6191916", "0.61801624", "0.61206764", "0.60355914", "0.59601206", "0.5855561", "0.57844305", "0.5700581", "0.56996197", "0.56953347", "0.5656746", "0.5649535", "0.5625262", "0.5623599", "0.5604455", "0.5591835", "0.5564...
0.0
-1
Support the following DHCP DeviceManager calls. self.plugin.release_dhcp_port(network.id, self.get_device_id(network))
def release_dhcp_port(self, network_id, device_id): LOG.debug("release_dhcp_port: %s %s", network_id, device_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_dhcp_port(self, network_id, device_id):\n return self.call(self.context,\n self.make_msg('release_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n ...
[ "0.8054723", "0.71377176", "0.6566923", "0.6155076", "0.5985578", "0.59473217", "0.58076227", "0.5672177", "0.5659102", "0.5577378", "0.5557", "0.55073285", "0.5491355", "0.54843926", "0.5454663", "0.5447936", "0.5415151", "0.54091704", "0.5385533", "0.53768826", "0.53333277"...
0.84403765
0
Construct and return an empty network model.
def empty_network(network_id=NETWORK_ID): return make_net_model({"id": network_id, "subnets": [], "ports": [], "tenant_id": "calico", "mtu": neutron_constants.DEFAULT_NETWORK_MTU})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n...
[ "0.703458", "0.6730677", "0.6650045", "0.66165984", "0.65974486", "0.65902346", "0.6558993", "0.645672", "0.6456341", "0.6397473", "0.6397053", "0.63881767", "0.6345971", "0.6332582", "0.63210064", "0.62721264", "0.62575936", "0.6252461", "0.62491995", "0.6246717", "0.6237882...
0.749267
0
Construct and return a copy of an existing network model.
def copy_network(source_net): return make_net_model({"id": source_net.id, "subnets": source_net.subnets, "ports": source_net.ports, "tenant_id": source_net.tenant_id, "mtu": source_net.mtu})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def clone(self):\n return _...
[ "0.7690615", "0.7457661", "0.7384238", "0.7347668", "0.69865394", "0.6961353", "0.6951828", "0.6855561", "0.6832441", "0.6813686", "0.6811424", "0.6808021", "0.66637117", "0.66387904", "0.66021425", "0.6570278", "0.65329057", "0.65179616", "0.6502068", "0.64940655", "0.644854...
0.68326706
8
Handler for endpoint creations and updates.
def on_endpoint_set(self, response, name): try: hostname, orchestrator, workload_id, endpoint_id = \ split_endpoint_name(name) except ValueError: # For some reason this endpoint's name does not have the expected # form. Ignore it. LOG.warning("Unexpected form for endpoint name: %s", name) return if hostname != self.hostname: LOG.info("Endpoint not on this node: %s", name) return # Get the endpoint spec. endpoint = etcdutils.safe_decode_json(response.value, 'endpoint') if not (isinstance(endpoint, dict) and 'spec' in endpoint and isinstance(endpoint['spec'], dict) and 'interfaceName' in endpoint['spec'] and 'ipNetworks' in endpoint['spec'] and 'mac' in endpoint['spec']): # Endpoint data is invalid; treat as deletion. LOG.warning("Invalid endpoint data: %s => %s", response.value, endpoint) self.on_endpoint_delete(None, name) return annotations = endpoint.get('metadata', {}).get('annotations', {}) endpoint = endpoint['spec'] # If the endpoint has no ipNetworks, treat as deletion. This happens # when a resync from the mechanism driver overlaps with a port/VM being # deleted. if not endpoint['ipNetworks']: LOG.info("Endpoint has no ipNetworks: %s", endpoint) self.on_endpoint_delete(None, name) return # Construct NetModel port equivalent of Calico's endpoint data. fixed_ips = [] dns_assignments = [] fqdn = annotations.get(datamodel_v3.ANN_KEY_FQDN) network_id = annotations.get(datamodel_v3.ANN_KEY_NETWORK_ID) allowedIps = [e.split('/')[0] for e in endpoint.get('allowedIps', [])] for addrm in endpoint['ipNetworks']: ip_addr = addrm.split('/')[0] if ip_addr in allowedIps: continue subnet_id = self.subnet_watcher.get_subnet_id_for_addr( ip_addr, network_id ) or self.v1_subnet_watcher.get_subnet_id_for_addr( ip_addr, network_id ) if subnet_id is None: LOG.warning("Missing subnet data for one of port's IPs") continue fixed_ips.append({'subnet_id': subnet_id, 'ip_address': ip_addr}) if fqdn: dns_assignments.append({'hostname': fqdn.split('.')[0], 'ip_address': ip_addr, 'fqdn': fqdn}) if not fixed_ips: LOG.warning("Endpoint has no DHCP-served IPs: %s", endpoint) return extra_dhcp_opts = [] mtu = self.mtu_watcher.get_mtu(endpoint['interfaceName']) self.mtu_watcher.watch_port(endpoint_id, endpoint['interfaceName']) if mtu: extra_dhcp_opts.append(self.get_mtu_option(mtu)) port = {'id': endpoint_id, 'device_owner': 'calico', 'device_id': endpoint['interfaceName'], 'fixed_ips': fixed_ips, 'mac_address': endpoint['mac'], # FIXME: Calico currently does not handle extra DHCP # options, other than MTU, but there might be use cases # where it should handle further options. # https://bugs.launchpad.net/networking-calico/+bug/1553348 'extra_dhcp_opts': extra_dhcp_opts} if fqdn: port['dns_assignment'] = dns_assignments # Ensure that the cache includes the network and subnets for this port, # and set the port's network ID correctly. try: port['network_id'] = self._ensure_net_and_subnets(port) except SubnetIDNotFound: LOG.warning("Missing data for one of port's subnets") return # Report this at INFO level if it is a new port. Note, we # come through this code periodically for existing ports also, # because of how we watch the etcd DB for changes. if endpoint_id not in self.local_endpoint_ids: LOG.info("New port: %s", port) self.local_endpoint_ids.add(endpoint_id) else: LOG.debug("Refresh already known port: %s", port) # Add this port into the NetModel. self.agent.cache.put_port(dhcp.DictModel(port)) # If we have seen the TAP interface, schedule updating Dnsmasq; # otherwise wait until we do see the TAP interface, whereupon # _update_dnsmasq will be called again. Dnsmasq updates can # take a little time, and they run in series, so it's best to # wait if we don't have the information we need yet, to avoid # delaying the correct Dnsmasq update that we really want. if mtu: self._update_dnsmasq(port['network_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_handler(event, context):\n return update_endpoint(event)", "def endpoint_create(self, endpoint_name=None, config=None):\n if config is None:\n raise Exception(\"Config required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint', 'PUT', body=config)\n...
[ "0.73057497", "0.68444353", "0.633986", "0.6060341", "0.6045354", "0.59219885", "0.5871254", "0.5845486", "0.58109283", "0.5770383", "0.5706339", "0.5677897", "0.5664306", "0.5596975", "0.55959713", "0.5579243", "0.5576328", "0.554373", "0.553638", "0.55146235", "0.5514128", ...
0.6030018
5
Ensure that the cache has a NetModel and subnets for PORT.
def _ensure_net_and_subnets(self, port): # Gather the subnet IDs that we need for this port, and get the # NetModel if we already have it in the cache. needed_subnet_ids = set() net = None for fixed_ip in port['fixed_ips']: subnet_id = fixed_ip.get('subnet_id') if subnet_id: needed_subnet_ids.add(subnet_id) if not net: net = self.agent.cache.get_network_by_subnet_id(subnet_id) LOG.debug("Needed subnet IDs: %s", needed_subnet_ids) LOG.debug("Existing network model by subnet ID: %s", net) # For each subnet that we need, get its data from SubnetWatcher and # hold for adding into the cache. new_subnets = {} for subnet_id in needed_subnet_ids: # Get data for this subnet from the SubnetWatchers. subnet = (self.subnet_watcher.get_subnet(subnet_id) or self.v1_subnet_watcher.get_subnet(subnet_id)) if subnet is None: LOG.warning("No data for subnet %s", subnet_id) raise SubnetIDNotFound() new_subnets[subnet_id] = subnet if not net: # We don't already have a NetModel, so look for a cached NetModel # with the right network ID. (In this case we must have new # subnets to add into the cache, and the cached NetModel must have # subnets other than the ones that we're adding in this iteration; # otherwise we would have already found it when searching by # subnet_id above.) assert new_subnets network_id = list(new_subnets.values())[0]['network_id'] net = self.agent.cache.get_network_by_id(network_id) LOG.debug("Existing network model by network ID: %s", net) if not net: # We still have no NetModel for the relevant network ID, so create # a new one. In this case we _must_ be adding new subnets. assert new_subnets net = empty_network(network_id) LOG.debug("New network %s", net) elif new_subnets: # We have a NetModel that was already in the cache and are about to # modify it. Cache replacement only works if the new NetModel is a # distinct object from the existing one, so make a copy here. net = copy_network(net) LOG.debug("Copied network %s", net) if new_subnets: # Add the new subnets into the NetModel. assert net net.subnets = [s for s in net.subnets if s.id not in new_subnets] net.subnets += list(new_subnets.values()) # Add (or update) the NetModel in the cache. LOG.debug("Net: %s", net) _fix_network_cache_port_lookup(self.agent, net.id) self.agent.cache.put(net) return net.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_network_cache_port_lookup(agent, network_id):\n\n # If there is an existing NetModel for this network ID, ensure that all\n # its ports are in the port_lookup dict.\n if network_id in agent.cache.cache:\n for port in agent.cache.cache[network_id].ports:\n agent.cache.port_lookup...
[ "0.5711651", "0.5685266", "0.5486034", "0.5470297", "0.53793174", "0.53505814", "0.52206916", "0.520188", "0.50624466", "0.50617975", "0.5060805", "0.5031961", "0.5029634", "0.50271976", "0.5011506", "0.5004499", "0.5002426", "0.49931684", "0.49833202", "0.4974392", "0.496706...
0.7904054
0
Start/stop/restart Dnsmasq for NETWORK_ID.
def _update_dnsmasq(self, network_id): # Check whether we should really do the following processing. if self.suppress_dnsmasq_updates: LOG.debug("Don't update dnsmasq yet;" " must be processing a snapshot") self.dirty_networks.add(network_id) return self.dnsmasq_updater.update_network(network_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startServices():\n # dnsmasq\n out_dnsmasq = subprocess.run([\"systemctl\", \"restart\", \"dnsmasq\"], stdout=subprocess.PIPE)\n if out_dnsmasq.returncode == 0:\n logging.info(\"dnsmasq service started/restarted successfully\")\n else:\n logging.error(\"dnsmasq service start restart e...
[ "0.57506925", "0.5736145", "0.5705236", "0.5628683", "0.56053996", "0.55890465", "0.5588804", "0.5274913", "0.5227108", "0.5199843", "0.5196082", "0.5161446", "0.51592255", "0.51375973", "0.51361805", "0.51020324", "0.50229216", "0.49852252", "0.49827528", "0.49735984", "0.49...
0.6410348
0
Handler for endpoint deletion.
def on_endpoint_delete(self, response_ignored, name): try: hostname, orchestrator, workload_id, endpoint_id = \ split_endpoint_name(name) except ValueError: # For some reason this endpoint's name does not have the expected # form. Ignore it. LOG.warning("Unexpected form for endpoint name: %s", name) return # Remove endpoint ID from our cache. Note, it might not be # there because we haven't checked whether the endpoint just # deleted is a local one; hence 'discard' instead of 'remove'. self.local_endpoint_ids.discard(endpoint_id) # Find the corresponding port in the DHCP agent's cache. port = self.agent.cache.get_port_by_id(endpoint_id) if port: LOG.debug("deleted port: %s", port) self.mtu_watcher.unwatch_port(endpoint_id, port.device_id) self.agent.cache.remove_port(port) self._update_dnsmasq(port.network_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_handler(event, context):\n delete_endpoint_config(event)", "def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()", "def delete_endpoint(EndpointName=None):\n pass", "def delete_endpoin...
[ "0.8157214", "0.76216006", "0.74637145", "0.7360159", "0.73364997", "0.7239041", "0.70955694", "0.69749475", "0.6959527", "0.69571376", "0.6916459", "0.68548656", "0.67948025", "0.6789727", "0.6783907", "0.6775918", "0.6722421", "0.6706916", "0.6706916", "0.66402143", "0.6639...
0.76158285
2
Called when a new snapshot is about to be read from etcdv3.
def _pre_snapshot_hook(self): # Add all current networks to the dirty set, so that we will stop their # Dnsmasqs if no longer needed. Also remove all port and subnet # information. LOG.debug("Reset cache for new snapshot") for network_id in list(self.agent.cache.get_network_ids()): self.dirty_networks.add(network_id) _fix_network_cache_port_lookup(self.agent, network_id) self.agent.cache.put(empty_network(network_id)) # Suppress Dnsmasq updates until we've processed the whole snapshot. self.suppress_dnsmasq_updates = True return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.name = self.snapshot[0]\r\n self.size = self.snapshot[1]\r\n self.cells = self.snapshot[2]\r\n self.bucket_array.load_snapshot()", "def load_snapshot(self):\r\n assert self.snapshot is not None\r\n ...
[ "0.61946166", "0.6155114", "0.57125986", "0.56532395", "0.5642027", "0.5526328", "0.55234843", "0.552134", "0.5452202", "0.54247737", "0.5324958", "0.52862036", "0.52843034", "0.5257525", "0.5253173", "0.5240661", "0.52345574", "0.520435", "0.5192152", "0.51732963", "0.508970...
0.49107116
29
Fix NetworkCache before removing or replacing a network. neutron.agent.dhcp.agent is bugged in that it adds the DHCP port into the cache without updating the cache's port_lookup dict, but then NetworkCache.remove() barfs if there is a port in network.ports but not in that dict... NetworkCache.put() implicitly does a remove() first if there is already a NetModel in the cache with the same ID. So a put() to update or replace a network also hits this problem. This method avoids that problem by ensuring that all of a network's ports are in the port_lookup dict. A caller should call this immediately before a remove() or a put().
def _fix_network_cache_port_lookup(agent, network_id): # If there is an existing NetModel for this network ID, ensure that all # its ports are in the port_lookup dict. if network_id in agent.cache.cache: for port in agent.cache.cache[network_id].ports: agent.cache.port_lookup[port.id] = network_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ensure_net_and_subnets(self, port):\n\n # Gather the subnet IDs that we need for this port, and get the\n # NetModel if we already have it in the cache.\n needed_subnet_ids = set()\n net = None\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip.get('subne...
[ "0.6243899", "0.5750837", "0.55582297", "0.5493871", "0.54649335", "0.53545606", "0.5274179", "0.52181244", "0.51568764", "0.51559347", "0.5134361", "0.51047474", "0.5044345", "0.5022658", "0.50088584", "0.50081223", "0.50078607", "0.49672914", "0.4941016", "0.49056178", "0.4...
0.7541457
0
Handler for subnet creations and updates.
def on_subnet_set(self, response, subnet_id): LOG.debug("Subnet %s created or updated", subnet_id) subnet_data = etcdutils.safe_decode_json(response.value, 'subnet') if subnet_data is None: LOG.warning("Invalid subnet data %s", response.value) return if not (isinstance(subnet_data, dict) and 'cidr' in subnet_data and 'gateway_ip' in subnet_data): LOG.warning("Invalid subnet data: %s", subnet_data) return self.subnets_by_id[subnet_id] = subnet_data return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subnet_create_event(self, subnet_info):\n\n subnet = subnet_info.get('subnet')\n if subnet:\n self.create_subnet(subnet)\n else:\n # Check whether request is for subnets.\n subnets = subnet_info.get('subnets')\n if subnets:\n for s...
[ "0.7963921", "0.7852703", "0.77121514", "0.74027985", "0.72448415", "0.7210939", "0.7181357", "0.70052475", "0.68840075", "0.6855804", "0.6780139", "0.6724793", "0.66803986", "0.6633644", "0.65841454", "0.6573212", "0.65678436", "0.6544619", "0.6485936", "0.6472504", "0.64677...
0.68601817
9
Handler for subnet deletions.
def on_subnet_del(self, response, subnet_id): LOG.info("Subnet %s deleted", subnet_id) if subnet_id in self.subnets_by_id: del self.subnets_by_id[subnet_id] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def subnet_delete_end(self, payload):\n subnet_id = payload['subnet_id']\n network = self.cache.get_network_by_subnet_id(subnet_id)\n ...
[ "0.7702621", "0.75508094", "0.7533499", "0.7260703", "0.72326857", "0.70582956", "0.6918789", "0.6917834", "0.6906911", "0.66798496", "0.6626956", "0.6558707", "0.6398431", "0.63931185", "0.6186472", "0.60645396", "0.6064328", "0.5995911", "0.59661895", "0.59549713", "0.59370...
0.73326564
3
Get data for the specified subnet.
def get_subnet(self, subnet_id): LOG.debug("Get subnet %s", subnet_id) if subnet_id not in self.subnets_by_id: return None data = self.subnets_by_id[subnet_id] LOG.debug("Subnet data: %s", data) # Convert to form expected by NetModel. ip_version = 6 if ':' in data['cidr'] else 4 subnet = {'enable_dhcp': True, 'ip_version': ip_version, 'cidr': data['cidr'], 'dns_nameservers': data.get('dns_servers') or [], 'id': subnet_id, 'gateway_ip': data['gateway_ip'], 'host_routes': data.get('host_routes', []), 'network_id': data.get('network_id', NETWORK_ID)} if ip_version == 6: subnet['ipv6_address_mode'] = DHCPV6_STATEFUL subnet['ipv6_ra_mode'] = DHCPV6_STATEFUL return dhcp.DictModel(subnet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_subnet(self, subnet, **_params):\r\n return self.get(self.subnet_path % (subnet), params=_params)", "def get_subnet_details(self, subnet_name=\"dummy_subnet\", subnet_id=None):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'x-auth-token': self.project_in...
[ "0.65944624", "0.6546326", "0.64507025", "0.61743736", "0.6011078", "0.6010956", "0.58433086", "0.58370644", "0.57784945", "0.57641536", "0.5734289", "0.57341164", "0.5653118", "0.56335646", "0.556625", "0.556153", "0.5557205", "0.54509723", "0.54425985", "0.5385783", "0.5367...
0.6255019
3
Run the EtcdWatcher loop.
def run(self): self.etcd.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()", "def run(self):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n event_handler = PatternMat...
[ "0.60920537", "0.60423386", "0.6004358", "0.59458005", "0.5935371", "0.5914737", "0.5896926", "0.58915985", "0.58893985", "0.5884968", "0.58668745", "0.5844063", "0.5844063", "0.58165354", "0.58041525", "0.58041525", "0.5799248", "0.57745546", "0.57614356", "0.5740617", "0.57...
0.6893604
0
starting_position (x,y) tuple representing current_position
def init(starting_position, steering_noise, distance_noise, sonar_noise, measurement_noise, speed, turning_speed, gps_delay, execution_cpu_time_limit): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_start_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc", "def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('startin...
[ "0.7147887", "0.7056669", "0.69363606", "0.6935041", "0.69009805", "0.69009805", "0.69009805", "0.6888912", "0.6874065", "0.68649364", "0.6857859", "0.67927134", "0.67808557", "0.6730022", "0.67153525", "0.67080766", "0.6668199", "0.66649705", "0.66494197", "0.6641967", "0.66...
0.0
-1
React to sensory data
def on_sense_sonar(self, dist): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_data():\n pass", "def data_changed(self):\n return", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update(self):", "def update(self):", "def update(self):", "def manage_info():", "def updat...
[ "0.5340235", "0.5297468", "0.5195286", "0.51405054", "0.51405054", "0.51405054", "0.50970787", "0.497346", "0.497207", "0.49673796", "0.49305034", "0.48768204", "0.48723578", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0....
0.0
-1
React to sensory data
def on_sense_field(self, field_type, field_parameter): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_data():\n pass", "def data_changed(self):\n return", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update(self):", "def update(self):", "def update(self):", "def manage_info():", "def updat...
[ "0.5340235", "0.5297468", "0.5195286", "0.51405054", "0.51405054", "0.51405054", "0.50970787", "0.497346", "0.497207", "0.49673796", "0.49305034", "0.48768204", "0.48723578", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0....
0.0
-1
React to sensory data
def on_sense_gps(self, x, y): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_data():\n pass", "def data_changed(self):\n return", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update(self):", "def update(self):", "def update(self):", "def manage_info():", "def updat...
[ "0.5340235", "0.5297468", "0.5195286", "0.51405054", "0.51405054", "0.51405054", "0.50970787", "0.497346", "0.497207", "0.49673796", "0.49305034", "0.48768204", "0.48723578", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0....
0.0
-1
Compiles robot from given file and returns class object
def compile_robot(file_name, module_name = "contestant_module"): global counter_module module_name += str(counter_module) counter_module += 1 mod = importCode(file_name, module_name) compiled_class = None for symbol in dir(mod): if hasattr(getattr(mod, symbol), "act") and getattr(mod, symbol).__name__ != "RobotController": compiled_class = getattr(mod, symbol) print compiled_class globals()[compiled_class.__name__] = compiled_class if compiled_class is None: raise KrakrobotException("Not found class with act() function named different than RobotController in provided .py") return compiled_class, mod
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepareRobot(self):\n f = StringIO.StringIO()\n f.write(self.zipfile)\n\n zip = zipfile.ZipFile(f)\n \n #modules of the form {\"robot\" : \"from sr import *...\", ...}\n modules = dict([(os.path.splitext(z.filename)[0], zip.open(z.filename).read())\n ...
[ "0.5830989", "0.55374384", "0.55307627", "0.54860145", "0.54444677", "0.5422501", "0.5409071", "0.5406872", "0.5393358", "0.5390054", "0.5387643", "0.52916986", "0.52400696", "0.5232065", "0.522042", "0.5217083", "0.52124226", "0.52053887", "0.5201384", "0.5178366", "0.517518...
0.7441334
0
Returns the associated driver with some custom settings.
def _get_selenium_browser(navigator, fLOG=noLOG): with warnings.catch_warnings(): warnings.simplefilter("ignore", ImportWarning) from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities fLOG("[webshot] navigator=", navigator) if navigator == "firefox": firefox_capabilities = DesiredCapabilities.FIREFOX.copy() firefox_capabilities['marionette'] = True firefox_capabilities[ 'binary'] = r"C:\Program Files (x86)\Mozilla Firefox\firefox.exe" browser = webdriver.Firefox(capabilities=firefox_capabilities) elif navigator == "chrome": if sys.platform.startswith("win"): chromed = where_in_path("chromedriver.exe") if chromed is None: install_chromedriver(fLOG=fLOG) chromed = where_in_path("chromedriver.exe") if chromed is None: raise FileNotFoundError( "unable to install 'chromedriver.exe'") else: fLOG("[_get_selenium_browser] found chromedriver:", chromed) else: chromed = 'chromedriver' start_navi = True if start_navi: fLOG("[_get_selenium_browser] start", navigator) chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--headless') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--verbose') browser = webdriver.Chrome(executable_path=chromed, chrome_options=chrome_options) else: with warnings.catch_warnings(): warnings.simplefilter("ignore", ImportWarning) import selenium.webdriver.chrome.service as wservice fLOG("[_get_selenium_browser] create service") service = wservice.Service(chromed) fLOG("[_get_selenium_browser] start service") service.start() fLOG("[_get_selenium_browser] declare remote") capabilities = {'chrome.binary': chromed} browser = webdriver.Remote(service.service_url, capabilities) elif navigator == "ie": browser = webdriver.Ie() elif navigator == "opera": if sys.platform.startswith("win"): chromed = where_in_path("operadriver.exe") if chromed is None: install_operadriver(fLOG=fLOG) chromed = where_in_path("operadriver.exe") if chromed is None: raise FileNotFoundError( "unable to install operadriver.exe") else: fLOG("[_get_selenium_browser] found chromedriver:", chromed) else: chromed = 'operadriver' browser = webdriver.Opera(chromed) # pylint: disable=E1101 elif navigator == "edge": browser = webdriver.Edge() else: raise RuntimeError( f"unable to interpret the navigator '{navigator}'") fLOG("[_get_selenium_browser] navigator is started") return browser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_driver(drv):\n return GenericDriver.get_driver(drv)", "def get_driver(self):\n\t\treturn self.driver", "def get_driver(self):\n return self.driver", "def driver(self):\n \n return self.__driver", "def _get_driver(self, driver_name):\n driver = lb_const.SERVICE_TYPE + driv...
[ "0.7748907", "0.7700252", "0.75033057", "0.73619497", "0.6932479", "0.6926911", "0.68707806", "0.6858391", "0.6836347", "0.6701114", "0.65786314", "0.65523374", "0.6457696", "0.6406544", "0.63908494", "0.6353427", "0.6292279", "0.62488717", "0.62426007", "0.6222315", "0.62124...
0.0
-1
>>> isinstance(lab7_q1(), str) True
def lab7_q1(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def is_str(x):\n return isinstance(x, str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def test_is_str...
[ "0.69384867", "0.6921196", "0.68661743", "0.6843844", "0.68393165", "0.6797242", "0.6774741", "0.67398", "0.6725461", "0.6714078", "0.6644135", "0.6597622", "0.6534236", "0.6527023", "0.6520744", "0.65187854", "0.65055484", "0.6498066", "0.64673746", "0.6463188", "0.64524394"...
0.0
-1
>>> isinstance(lab7_q2(), str) True
def lab7_q2(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isi...
[ "0.68337935", "0.67917323", "0.67564535", "0.6700116", "0.6695061", "0.6639795", "0.66282505", "0.659525", "0.6584475", "0.6577788", "0.65511394", "0.64796364", "0.6450336", "0.64198357", "0.63853145", "0.63774383", "0.6376907", "0.63764954", "0.63604254", "0.6353656", "0.635...
0.0
-1
>>> isinstance(lab7_q3(), str) True
def lab7_q3(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def is_str(x):\n return isinstance(x, str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return Fal...
[ "0.6728935", "0.6689395", "0.6678108", "0.6677781", "0.66326827", "0.6617523", "0.660961", "0.6559037", "0.6538614", "0.6513645", "0.65115064", "0.6501745", "0.64532465", "0.63854235", "0.63740855", "0.63691497", "0.6352172", "0.6336235", "0.6310395", "0.63009924", "0.6293086...
0.0
-1
>>> isinstance(lab7_q4(), str) True
def lab7_q4(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str(x):\n return isinstance(x, str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat...
[ "0.6849578", "0.6827361", "0.68070114", "0.6767357", "0.6741126", "0.6712229", "0.6685362", "0.6681972", "0.66778773", "0.6619105", "0.6560577", "0.6549617", "0.64978635", "0.64887595", "0.64553833", "0.6445665", "0.64326364", "0.6430888", "0.64252806", "0.64158726", "0.64056...
0.0
-1
>>> isinstance(lab8_q2(), str) True
def lab8_q2(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str ( self, s ):\r\n\t\treturn i...
[ "0.69167274", "0.6739174", "0.67315763", "0.6592448", "0.6537371", "0.65342003", "0.64775366", "0.64470917", "0.6442934", "0.6439365", "0.64354664", "0.6411122", "0.63913774", "0.63811743", "0.6379064", "0.6348017", "0.6300297", "0.6297146", "0.6295145", "0.6274249", "0.62652...
0.0
-1
>>> isinstance(lab8_q3(), str) True
def lab8_q3(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardu...
[ "0.69046277", "0.66528326", "0.6531326", "0.6531229", "0.6520508", "0.6515218", "0.6508465", "0.6506103", "0.64962703", "0.64860326", "0.647134", "0.6461159", "0.64014983", "0.636057", "0.6318982", "0.63128656", "0.6281687", "0.6279039", "0.626913", "0.6251587", "0.6250445", ...
0.0
-1
>>> isinstance(lab8_q4(), str) True
def lab8_q4(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_string(obj):\n return isi...
[ "0.69331145", "0.67646885", "0.669969", "0.66482395", "0.6627652", "0.66142136", "0.6606139", "0.6588437", "0.6559902", "0.65317225", "0.6450003", "0.6448838", "0.6428933", "0.64125264", "0.64066905", "0.63528246", "0.6321325", "0.6315828", "0.63132316", "0.631172", "0.630439...
0.0
-1
>>> isinstance(lab8_q5(), str) True
def lab8_q5(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_string(value):\n return isinstance(value, basestring)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def test_is_str_rep_string(self):\n self.assertIsInstance(card...
[ "0.69726", "0.6854784", "0.6776574", "0.67209005", "0.66947055", "0.66858095", "0.6676679", "0.6650898", "0.66201556", "0.66064316", "0.6565904", "0.65590566", "0.6524552", "0.64751035", "0.6468422", "0.6456738", "0.64451706", "0.6415069", "0.6407343", "0.639586", "0.63950634...
0.0
-1
>>> isinstance(lab9_q2(), str) True
def lab9_q2(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def is_str(x):\n return isinstance(x, str)", "def _is_...
[ "0.69377685", "0.680619", "0.67842895", "0.67540807", "0.67217183", "0.6663479", "0.6607859", "0.65969336", "0.6555628", "0.6487519", "0.64803255", "0.6448528", "0.64470345", "0.6439503", "0.6410575", "0.6409048", "0.640582", "0.6395083", "0.63912946", "0.63880926", "0.637876...
0.0
-1
>>> isinstance(lab9_q3(), str) True
def lab9_q3(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_str(x):\n return isinstance(x, str)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def _is_string(data):\n return len(data) and isinstanc...
[ "0.67149335", "0.66803926", "0.66799915", "0.66795844", "0.6666466", "0.6657703", "0.662522", "0.6536551", "0.64925563", "0.6467223", "0.64577836", "0.64136744", "0.64020425", "0.6394686", "0.63700676", "0.63617307", "0.63562024", "0.6313202", "0.62957275", "0.6292955", "0.62...
0.0
-1
>>> isinstance(lab9_q4(), str) True
def lab9_q4(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str(x):\n return isinstance(x, str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n return True\n else:\n ...
[ "0.6942101", "0.6876074", "0.67846864", "0.67608213", "0.6748942", "0.6748524", "0.6707322", "0.66833335", "0.6645385", "0.65739566", "0.6512699", "0.64795643", "0.6473345", "0.64647806", "0.64464295", "0.64428717", "0.64399874", "0.6432312", "0.64164597", "0.63935167", "0.63...
0.0
-1
>>> isinstance(lab9_q5(), str) True
def lab9_q5(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str(x):\n return isinstance(x, str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n ...
[ "0.6877922", "0.6834238", "0.68263847", "0.6825494", "0.68185055", "0.6800018", "0.6793659", "0.66846204", "0.66589445", "0.6589347", "0.657317", "0.6551655", "0.65453273", "0.6524285", "0.6505871", "0.6494237", "0.64909095", "0.64687836", "0.64158064", "0.6410573", "0.639751...
0.0
-1
>>> isinstance(lab10_q2(), str) True
def lab10_q1(): return """ Since there has to be at least two, len(self.fruits) >=2 and at least one cup, self.cups (number == True) Decrease cups by 1 because we use one to create mixed juice. first pop gives the 0 index fruit, second pop gives the next 0 index which is originally at 1 index. len(item) to show number of letters then -1 because of hyphen. """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_str(x):\n return isinstance(x, str)", "def _is_...
[ "0.70104426", "0.6694115", "0.66540843", "0.6631845", "0.65846175", "0.6546801", "0.6527376", "0.6501472", "0.6483732", "0.6467406", "0.64658016", "0.64406013", "0.64081883", "0.6402971", "0.6375239", "0.63414836", "0.6304244", "0.62933546", "0.62872994", "0.6280994", "0.6278...
0.0
-1
>>> isinstance(lab10_q3(), str) True
def lab10_q2(): return """ Make a 'total' to add up the total revenue as while loop plays. item = qvm.dispense, makes it so item is the juice. While item: because if there is a juice this while loop will continue. If there aren't two fruits, there wont be a juice so while loop stops. total += qvm.collect_money(item). (this should be += not = right?) to add the revenue from that 'item' revalue item to the next juice. return total """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str(x):\n return isinstance(x, str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, typ...
[ "0.6767156", "0.662008", "0.65915257", "0.65897584", "0.6570212", "0.6540568", "0.6512019", "0.6466866", "0.6387287", "0.6369226", "0.634048", "0.6340179", "0.6338254", "0.633478", "0.6307848", "0.6269958", "0.6269275", "0.6240808", "0.6229523", "0.62286043", "0.62249273", ...
0.0
-1
>>> isinstance(lab10_q4(), str) True
def lab10_q3(): return """ Use list comprehension max(lst_of_qvm, key=lambda qvm : total_revenue(qvm)) This makes each element of the list go through the key which gives total_revenue for each one. Then just get the max in that list """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str(x):\n return isinstance(x, str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n return True\n else:\n ...
[ "0.7007634", "0.6753712", "0.6680644", "0.6641429", "0.66254205", "0.66071725", "0.6594754", "0.65537024", "0.6541653", "0.6496038", "0.6468674", "0.6443504", "0.6440572", "0.6406669", "0.6398117", "0.6318649", "0.63153744", "0.6313979", "0.63125205", "0.6301631", "0.6300007"...
0.0
-1
>>> isinstance(lab10_q5(), str) True
def lab10_q4(): return """ Use 'for i in range(len(seq)//2):' to give i be the indexing for the first half of the lst. Then make sure it is equal to the opposite indexing which is [-i-1] or [len(seq)-1-i] if it is not equal return false if the for loop is done without fail it means they are all equal so return True """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n retur...
[ "0.69396424", "0.6750825", "0.67340475", "0.6716429", "0.6714517", "0.66726655", "0.665966", "0.66213614", "0.6544444", "0.64893436", "0.64751285", "0.64728814", "0.64611757", "0.6450565", "0.6434197", "0.6419371", "0.6414928", "0.638308", "0.6381245", "0.6368124", "0.6322138...
0.0
-1
>>> isinstance(lab10_q5(), str) True
def lab10_q5(): return """ 'assert type(c) is int' to make sure c is a number/integer make a helper function to solve this with the same parameters first base case is if the count is 0 which returns Link.empty if true Then is when lst is Link.empty, which should just Link.empty as well recursion for these base cases where you just link the first with the helper(rest, c) :Link(lst.first, helper(lst.rest, count)) """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n retur...
[ "0.69393235", "0.67513615", "0.6735242", "0.67170364", "0.67146415", "0.6673512", "0.6659964", "0.6621408", "0.6545047", "0.6489594", "0.64740264", "0.64732605", "0.64615583", "0.6448504", "0.64349174", "0.6421704", "0.64157605", "0.63823795", "0.6382036", "0.6368499", "0.632...
0.0
-1
>>> isinstance(lab10_q5(), str) True
def lab10_q6(): return """ Use try: return dictionary[key] so that if there is a key and value the value will be the output. make except Keyerror where it prints "Avoid Exception" and the new value for this key will be 'no value' next time it is called. It will output 'no value' """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n retur...
[ "0.69393235", "0.67513615", "0.6735242", "0.67170364", "0.67146415", "0.6673512", "0.6659964", "0.6621408", "0.6545047", "0.6489594", "0.64740264", "0.64732605", "0.64615583", "0.6448504", "0.64349174", "0.6421704", "0.64157605", "0.63823795", "0.6382036", "0.6368499", "0.632...
0.0
-1
Function to read the file
def read_file(file="input4.txt"): res = [] with open(file, encoding='utf-8') as f: for i in f.readlines(): res.append(i.split()) for i in res: t = i.pop(1).split(':') i.insert(1, t[0]) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(path):", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def read_from_file(self, filename: str) -> None:", "def read_file(path_to_file):\n 8", "def read(self, filename):\n raise NotImplementedError", "def read_file():\n with open(FILE_NAME) as...
[ "0.8188726", "0.8140984", "0.8140984", "0.7953263", "0.78393626", "0.7722133", "0.7643032", "0.761069", "0.75103563", "0.7457068", "0.735805", "0.73315775", "0.7328963", "0.7327442", "0.73023856", "0.72954583", "0.72721654", "0.72382605", "0.7223386", "0.7222797", "0.7218205"...
0.0
-1
Function to create a dict
def create_dict(info): """ dict = {ip: {counter:*}, {weekdays: []}, {hours: []}} """ dict_info = dict() for i in info: ip = i[0] hours = i[1] weekdays = i[2] if ip not in dict_info: dict_info[ip] = {} dict_info[ip]['counter'] = 0 dict_info[ip]['hours'] = [] dict_info[ip]['weekdays'] = [] dict_info[ip]['counter'] += 1 dict_info[ip]['hours'].append(hours) dict_info[ip]['weekdays'].append(weekdays) return dict_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_dict(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]:\n return _DictMaker(struct_class=cls, positional_args=args, keyword_args=kwargs).make_dict()", "def create_dict(*args):\n output = {}\n idx = 0\n while idx < len(args):\n output[args[idx + 1]] = args[idx]\n idx += 2\n\...
[ "0.71769756", "0.70463216", "0.69759154", "0.68891", "0.68752956", "0.6867047", "0.67607605", "0.6733505", "0.66315436", "0.6606104", "0.65966946", "0.6580841", "0.6546656", "0.65415686", "0.64865774", "0.64681804", "0.64519143", "0.6376904", "0.63741493", "0.6370084", "0.634...
0.6324139
23
Function to write to file
def write_to_file(info, mode='w', file="output4.txt"): with open(file, mode, encoding='utf-8') as f: for line in info: f.write(' '.join(map(str, line)) + '\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_to_file(self, filename: str) -> None:", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write(self, fname):\n pass", "def file_write(stuff, file_path):\n with open(file_path, \"wt\") as fo:\n fo.write(stuff)", "def write_file(*args, **kwar...
[ "0.8413077", "0.80287683", "0.80287683", "0.80265", "0.79803306", "0.7701467", "0.7610143", "0.75384223", "0.7487648", "0.7396836", "0.73585635", "0.730112", "0.72875077", "0.7278125", "0.7264448", "0.72381777", "0.7235967", "0.7231617", "0.7231545", "0.7180415", "0.7160275",...
0.0
-1
Function to create the resulting list
def result_info(file="input4.txt"): dict_info = create_dict(read_file()) result = [] dict_of_hours = dict() for ip, info in dict_info.items(): # I go through the dictionary with information for i in info['hours']: # I consider for each hour the number of visits if i not in dict_of_hours: dict_of_hours[i] = 0 dict_of_hours[i] += 1 most_frequent_day = 0 # counter for the most frequent for for each ip most_frequent_word = info['weekdays'][0] for i in info['weekdays']: if most_frequent_day < info['weekdays'].count(i): most_frequent_day = info['weekdays'].count(i) most_frequent_word = i result.append([ip, info['counter'], most_frequent_word]) max_number = 0 most_frequent_hour = 0 for hour, number in dict_of_hours.items(): # I go through the dict with the hours to find the most popular if number > max_number: max_number = number most_frequent_hour = hour result.append(['Самый популярный час на сайте:', most_frequent_hour]) write_to_file(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_result_list(self,res):\n res_list = []\n for r in res:\n res_list.append(r)\n\n return res_list", "def X(self)->list:", "def make_list(unused_s, unused_l, toks):\n result = []\n for item in toks:\n result.append(item.asList())\n return r...
[ "0.72328603", "0.6897152", "0.67518175", "0.6660444", "0.6660444", "0.6498758", "0.6368195", "0.63266546", "0.63058394", "0.62481105", "0.62481105", "0.6236731", "0.62217695", "0.6189998", "0.617926", "0.6137528", "0.6121866", "0.61136776", "0.6106905", "0.60869616", "0.60864...
0.0
-1
Decode serialized example into image and segmentation label.
def decode(value): keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/height': tf.FixedLenFeature((), tf.int64, default_value=0), 'image/width': tf.FixedLenFeature((), tf.int64, default_value=0), 'image/segmentation/class/encoded': tf.FixedLenFeature((), tf.string, default_value='') } data = tf.parse_single_example(value, keys_to_features) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(self, serialized_example):\n parsed_tensors = tf.io.parse_single_example(\n serialized_example, self.KEYS_TO_FEATURES\n )\n for k in parsed_tensors:\n if isinstance(parsed_tensors[k], tf.SparseTensor):\n if parsed_tensors[k].dtype == tf.string:\n...
[ "0.66928875", "0.65845627", "0.6560088", "0.65355873", "0.64627737", "0.62374485", "0.6098066", "0.6090909", "0.59154075", "0.5877989", "0.58274025", "0.5801115", "0.5764262", "0.574245", "0.5734998", "0.57296777", "0.57066524", "0.56981283", "0.56740636", "0.5664909", "0.565...
0.62265766
6
Initializes parameters for parsing annotations in the dataset.
def __init__(self, output_size, resize_eval=False, ignore_label=255, aug_rand_hflip=False, aug_scale_min=1.0, aug_scale_max=1.0, aug_policy='', use_bfloat16=True, mode=None): self._mode = mode self._is_training = (mode == ModeKeys.TRAIN) self._output_size = output_size self._resize_eval = resize_eval self._ignore_label = ignore_label # Data augmentation. self._aug_rand_hflip = aug_rand_hflip self._aug_scale_min = aug_scale_min self._aug_scale_max = aug_scale_max self._aug_policy = aug_policy # Device. self._use_bfloat16 = use_bfloat16 # Data is parsed depending on the model Modekey. if mode == ModeKeys.TRAIN: self._parse_fn = self._parse_train_data elif mode == ModeKeys.EVAL: self._parse_fn = self._parse_eval_data elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT: self._parse_fn = self._parse_predict_data else: raise ValueError('mode is not defined.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_all_params(self):\n self.annotations_timestamp = 0\n # self.annotations_offset = 0\n # self.annotation_offset_text.configure(text='Current: %d' % self.annotations_offset)\n self.annotations_timestamp_text.configure(text='Annotation timestamp:\\n %d' % self.annotations_timestamp...
[ "0.6673975", "0.65937763", "0.6560803", "0.65485716", "0.65461046", "0.64888483", "0.6485457", "0.64349765", "0.63921803", "0.6385465", "0.63169533", "0.62924635", "0.6280703", "0.6174638", "0.61208785", "0.60870314", "0.6060363", "0.6031667", "0.60257536", "0.60226005", "0.6...
0.0
-1
Parses data to an image and associated training labels.
def __call__(self, value): with tf.name_scope('parser'): data = decode(value) return self._parse_fn(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_train_data(self, data):\n image, label = self._prepare_image_and_label(data)\n\n # Flips image randomly during training.\n if self._aug_rand_hflip:\n image, label = input_utils.random_horizontal_flip(image, masks=label)\n\n # Resizes and crops image.\n image, image_info = input_utils...
[ "0.7921838", "0.75767523", "0.7083806", "0.70367414", "0.6984986", "0.6949761", "0.68525404", "0.6838453", "0.68272054", "0.6822626", "0.67811656", "0.67373544", "0.6718452", "0.66978455", "0.6692323", "0.66836244", "0.66790134", "0.66592354", "0.6651176", "0.6622316", "0.661...
0.0
-1
Prepare normalized image and label.
def _prepare_image_and_label(self, data): image = tf.io.decode_image(data['image/encoded'], channels=3) label = tf.io.decode_image(data['image/segmentation/class/encoded'], channels=1) height = data['image/height'] width = data['image/width'] image = tf.reshape(image, (height, width, 3)) label = tf.reshape(label, (1, height, width)) label = tf.cast(label, tf.float32) # Normalizes image with mean and std pixel values. image = input_utils.normalize_image(image) return image, label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, n...
[ "0.70676106", "0.68583", "0.66364515", "0.65736663", "0.6464647", "0.64554685", "0.6352765", "0.6297984", "0.625139", "0.62383085", "0.62295324", "0.62290204", "0.62271994", "0.6218579", "0.61705977", "0.6143832", "0.61042327", "0.60809606", "0.60745084", "0.60027677", "0.598...
0.72255284
0
Parses data for training and evaluation.
def _parse_train_data(self, data): image, label = self._prepare_image_and_label(data) # Flips image randomly during training. if self._aug_rand_hflip: image, label = input_utils.random_horizontal_flip(image, masks=label) # Resizes and crops image. image, image_info = input_utils.resize_and_crop_image( image, self._output_size, self._output_size, aug_scale_min=self._aug_scale_min, aug_scale_max=self._aug_scale_max) # Resizes and crops boxes. image_scale = image_info[2, :] offset = image_info[3, :] # Pad label and make sure the padded region assigned to the ignore label. # The label is first offset by +1 and then padded with 0. label += 1 label = tf.expand_dims(label, axis=3) label = input_utils.resize_and_crop_masks( label, image_scale, self._output_size, offset) label -= 1 label = tf.where(tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label) label = tf.squeeze(label, axis=0) valid_mask = tf.not_equal(label, self._ignore_label) labels = { 'masks': label, 'valid_masks': valid_mask } # If bfloat16 is used, casts input image to tf.bfloat16. if self._use_bfloat16: image = tf.cast(image, dtype=tf.bfloat16) return image, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_data():\n n_train, n_test = 15000, 4996\n n_features = 1355191\n\n print('- parsing train data')\n X_train = sp.lil_matrix((n_train, n_features))\n y_train = np.zeros(n_train)\n with open('/Users/kitazawa/data/news20.train') as f:\n lines = map(lambda l: l.rstrip().split(' '), f....
[ "0.7268114", "0.6815508", "0.6703304", "0.6685302", "0.65728796", "0.64867747", "0.6361533", "0.63356423", "0.6325312", "0.6323176", "0.631201", "0.63008344", "0.6293298", "0.6284289", "0.62585795", "0.62380934", "0.6225368", "0.61996716", "0.6173986", "0.6165433", "0.6161794...
0.60057056
45
Parses data for training and evaluation.
def _parse_eval_data(self, data): image, label = self._prepare_image_and_label(data) # The label is first offset by +1 and then padded with 0. label += 1 label = tf.expand_dims(label, axis=3) if self._resize_eval: # Resizes and crops image. image, image_info = input_utils.resize_and_crop_image( image, self._output_size, self._output_size) # Resizes and crops mask. image_scale = image_info[2, :] offset = image_info[3, :] label = input_utils.resize_and_crop_masks(label, image_scale, self._output_size, offset) else: # Pads image and mask to output size. image = tf.image.pad_to_bounding_box(image, 0, 0, self._output_size[0], self._output_size[1]) label = tf.image.pad_to_bounding_box(label, 0, 0, self._output_size[0], self._output_size[1]) label -= 1 label = tf.where(tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label) label = tf.squeeze(label, axis=0) valid_mask = tf.not_equal(label, self._ignore_label) labels = { 'masks': label, 'valid_masks': valid_mask } # If bfloat16 is used, casts input image to tf.bfloat16. if self._use_bfloat16: image = tf.cast(image, dtype=tf.bfloat16) return image, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_data():\n n_train, n_test = 15000, 4996\n n_features = 1355191\n\n print('- parsing train data')\n X_train = sp.lil_matrix((n_train, n_features))\n y_train = np.zeros(n_train)\n with open('/Users/kitazawa/data/news20.train') as f:\n lines = map(lambda l: l.rstrip().split(' '), f....
[ "0.7268114", "0.6815508", "0.6703304", "0.6685302", "0.65728796", "0.64867747", "0.6361533", "0.63356423", "0.6325312", "0.6323176", "0.631201", "0.63008344", "0.6293298", "0.6284289", "0.62585795", "0.62380934", "0.6225368", "0.61996716", "0.6173986", "0.6165433", "0.6161794...
0.0
-1
Parses data for prediction.
def _parse_predict_data(self, data): image, labels = self._parse_eval_data(data) return { 'images': image, 'labels': labels }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_dataset(self, data):\n pass", "def predict(self, datafile):", "def predict(self, data: List):", "def predict(self, data):\n\t\traise NotImplementedError", "def postprocess(self, data):\n all_predictions, all_nbest_json, scores_diff_json = predictions(self._dev_dataset,\n ...
[ "0.70179003", "0.68369985", "0.67741996", "0.6700655", "0.665754", "0.6550395", "0.6539271", "0.6422481", "0.6383678", "0.63100535", "0.6299874", "0.62833273", "0.6269634", "0.6264631", "0.6253759", "0.62460303", "0.62442476", "0.621106", "0.6204033", "0.6197395", "0.61885244...
0.7601515
0
If an iteriter_op is given an iterator as input, no exception should be thrown, and we should return the wrapped function's output.
def test_iteriter_op_1(): @ops.iteriter_op def f(x): return iter([4, 5, 6]) result = f(iter([1, 2, 3])) # Passing in an iterator, as expected assert(isinstance(result, collections.abc.Iterator)), f"{result}" assert(list(result) == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n...
[ "0.734651", "0.7288327", "0.6928596", "0.6799886", "0.66857606", "0.6591529", "0.6551025", "0.6511721", "0.63353145", "0.59754914", "0.5973058", "0.59268594", "0.5852119", "0.58467436", "0.5843037", "0.583689", "0.57858115", "0.5740564", "0.5712443", "0.56730723", "0.5633042"...
0.74068475
0
If an iteriter_op is given something besides an iterator as input, raise a ValueError.
def test_iteriter_op_2(): @ops.iteriter_op def f(x): return iter([4, 5, 6]) with pytest.raises(ValueError): f([1, 2, 3]) # Passing in a list instead of an iterator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n...
[ "0.7217243", "0.7193178", "0.6957775", "0.68198645", "0.6498388", "0.6485671", "0.63076574", "0.6105469", "0.58576494", "0.5822482", "0.57532716", "0.57530195", "0.5725821", "0.56677824", "0.5644975", "0.5638798", "0.5572163", "0.5566992", "0.5561028", "0.5558914", "0.5509184...
0.75408363
0
If an iteriter_op returns something besides an iterator as output, raise a ValueError.
def test_iteriter_op_3(): @ops.iteriter_op def f(x): return [4, 5, 6] # Returning a list instead of an iterator with pytest.raises(ValueError): result = f(iter([1, 2, 3]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with...
[ "0.76426315", "0.73116994", "0.7197692", "0.69156325", "0.6845044", "0.6789451", "0.63753885", "0.6361283", "0.62555766", "0.6095181", "0.6085253", "0.59735376", "0.5929287", "0.5857033", "0.58470154", "0.58267", "0.57885355", "0.57856303", "0.57807213", "0.576038", "0.573720...
0.7515847
1
If a listlist_op is given a list as input, no exception should be thrown, and we should return the wrapped function's output.
def test_listlist_op_1(): @ops.listlist_op def f(x): return [4, 5, 6] result = f([1, 2, 3]) # Passing in a list, as expected assert(isinstance(result, list)), f"{result}" assert(result == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n w...
[ "0.7166146", "0.71624446", "0.7014189", "0.6786135", "0.6684333", "0.6671589", "0.6536824", "0.6533272", "0.6519165", "0.64344203", "0.6429467", "0.64098346", "0.6329758", "0.6261592", "0.6214731", "0.61999166", "0.6112252", "0.6028815", "0.6003266", "0.5961093", "0.5949775",...
0.77922696
0
If a listlist_op is given something besides a list as input, raise a ValueError.
def test_listlist_op_2(): @ops.listlist_op def f(x): return [4, 5, 6] with pytest.raises(ValueError): f(iter([1, 2, 3])) # Passing in an iterator instead of an list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_neg_operate_list_invalid_...
[ "0.6896146", "0.6843723", "0.6687192", "0.66674805", "0.6584861", "0.650589", "0.64047855", "0.6385669", "0.6318132", "0.62996775", "0.6293289", "0.62533414", "0.6251247", "0.62425464", "0.62239265", "0.62199765", "0.6166311", "0.61608076", "0.6119108", "0.6056942", "0.600924...
0.71428686
0
If a listlist_op returns something besides a list as output, raise a ValueError.
def test_listlist_op_3(): @ops.listlist_op def f(x): return iter([4, 5, 6]) # Returning an iterator instead of an list with pytest.raises(ValueError): result = f([1, 2, 3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = ...
[ "0.73349303", "0.71511084", "0.6736014", "0.66537863", "0.6652018", "0.6627136", "0.65927994", "0.6493474", "0.6485327", "0.6485104", "0.64288867", "0.6416885", "0.6406958", "0.63908684", "0.63792086", "0.6378879", "0.63132125", "0.6302996", "0.6289423", "0.62221825", "0.6194...
0.72641724
1
If a listiter_op is given a list as input, no exception should be thrown, and we should return the wrapped function's output.
def test_listiter_op_1(): @ops.listiter_op def f(x): return iter([4, 5, 6]) result = f([1, 2, 3]) # Passing in a list, as expected assert(isinstance(result, collections.abc.Iterator)), f"{result}" assert(list(result) == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_listiter_op_3():\n\n @ops.listiter_op\n de...
[ "0.7756567", "0.75442225", "0.75318354", "0.75228345", "0.747424", "0.74187565", "0.7351864", "0.71246946", "0.6990763", "0.6881843", "0.68182045", "0.66678816", "0.65725154", "0.65193516", "0.62121695", "0.6203566", "0.59676445", "0.5919752", "0.5807262", "0.5753957", "0.565...
0.76254886
1
If a listiter_op is given something besides a list as input, raise a ValueError.
def test_listiter_op_2(): @ops.listiter_op def f(x): return iter([4, 5, 6]) with pytest.raises(ValueError): f(iter([1, 2, 3])) # Passing in an iterator instead of a list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pyte...
[ "0.76405627", "0.7451966", "0.7407579", "0.73711157", "0.7272287", "0.6842472", "0.67784035", "0.6713179", "0.65655285", "0.6331", "0.62677497", "0.62505025", "0.6126729", "0.6060513", "0.60524344", "0.60413057", "0.5994478", "0.5923151", "0.5910163", "0.5815476", "0.5803793"...
0.75528175
1
If a listiter_op returns something besides an iterator as output, raise a ValueError.
def test_listiter_op_3(): @ops.listiter_op def f(x): return [4, 5, 6] # Returning a list instead of an iterator with pytest.raises(ValueError): result = f([1, 2, 3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Re...
[ "0.800803", "0.7905361", "0.77610767", "0.77083707", "0.77032125", "0.75699127", "0.7554352", "0.70976245", "0.70190036", "0.67172563", "0.6027455", "0.58875996", "0.5732145", "0.57207274", "0.57120436", "0.57120436", "0.5675813", "0.5638414", "0.5630779", "0.5580102", "0.555...
0.77557844
3
If an iterlist_op is given an iterator as input, no exception should be thrown, and we should return the wrapped function's output.
def test_iterlist_op_1(): @ops.iterlist_op def f(x): return [4, 5, 6] result = f(iter([1, 2, 3])) # Passing in an iterator, as expected assert(isinstance(result, list)), f"{result}" assert(result == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n ...
[ "0.76466507", "0.76008874", "0.7570353", "0.7508546", "0.74234086", "0.7396815", "0.7382202", "0.73407215", "0.7291801", "0.72297966", "0.6824224", "0.666192", "0.6128793", "0.60040385", "0.5923186", "0.58794075", "0.58676404", "0.57020146", "0.56718487", "0.5595165", "0.5567...
0.76173055
1
If an iterlist_op is given something besides an iterator as input, raise a ValueError.
def test_iterlist_op_2(): @ops.iterlist_op def f(x): return [4, 5, 6] with pytest.raises(ValueError): f([1, 2, 3]) # Passing in a list instead of an iterator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Re...
[ "0.78835404", "0.7705365", "0.7613191", "0.7498913", "0.7423901", "0.74038625", "0.73198646", "0.67817676", "0.6730599", "0.6505109", "0.5887717", "0.5857515", "0.58052135", "0.5753452", "0.5708452", "0.5689192", "0.56823623", "0.5681861", "0.56544685", "0.56081665", "0.56028...
0.7661832
2
If an iterlist_op returns something besides a list as output, raise a ValueError.
def test_iterlist_op_3(): @ops.iterlist_op def f(x): return iter([4, 5, 6]) # Returning an iterator instead of a list with pytest.raises(ValueError): result = f(iter([1, 2, 3]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returni...
[ "0.77509636", "0.76935655", "0.76132554", "0.7517712", "0.7484861", "0.7235555", "0.70362914", "0.7030836", "0.6869668", "0.66613793", "0.6354757", "0.630842", "0.6297937", "0.61403257", "0.60431194", "0.6029395", "0.6019079", "0.6019079", "0.59903765", "0.5965533", "0.595312...
0.75992316
3
Constant evaluation should ignore the existing fitness function and set the fitness of all individuals to the same value.
def test_const_evaluate(): pop = test_population pop = ops.const_evaluate(pop, value=123456789.0) for ind in pop: assert(pytest.approx(123456789.0) == ind.fitness)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _recompute_fitness(self):\n for cur in self.population:\n if cur['fitness'] is None:\n cur['fitness'] = self.op.fitness(cur['individual'])", "def fitness(self):\n pass", "def evaluate(self, fitness):\n self.fitness = fitness(self.phenotype)", "def fitness_sh...
[ "0.74931896", "0.7079468", "0.69477665", "0.6825639", "0.67214495", "0.6705714", "0.6655073", "0.6554352", "0.6519272", "0.6481715", "0.6447769", "0.62911665", "0.61991894", "0.6193287", "0.6174193", "0.6151497", "0.6150842", "0.6150842", "0.610781", "0.60786945", "0.607092",...
0.6333001
11
If a pool of size 3 is used, the first 3 individuals in the input iterator should be collected into a list.
def test_pool(): pop = iter([ 'a', 'b', 'c', 'd', 'e' ]) pop = ops.pool(pop, size=3) assert(len(pop) == 3) assert(pop == [ 'a', 'b', 'c' ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lazy_groups_of(iterator: Iterator[A], group_size: int) -> Iterator[List[A]]:\n return iter(lambda: list(islice(iterator, 0, group_size)), [])", "def construct(self, x):\n results = []\n x = self.pool0(x)\n results.append(x)\n x = self.pool1(x)\n results.append(x)\n ...
[ "0.640328", "0.6058334", "0.58099174", "0.5747291", "0.57104117", "0.5681089", "0.56514186", "0.5616282", "0.56008047", "0.55808794", "0.5538672", "0.5503756", "0.5503756", "0.54849136", "0.5432563", "0.54290795", "0.54290795", "0.54163194", "0.54163116", "0.5399655", "0.5391...
0.7016338
0
Compute the color of a given pixel.
def start_up(t, coord, ii, n_pixels, value): global position x, y, z = coord # print(coord) # print("position") # print(int(position)) if (ii == 0): r = value[int(position)] g = value[int(position)] b = value[int(position)] elif (ii == 1 or ii == 29 or ii == 28): r = value[int(position)] * .7 g = value[int(position)] * .5 b = value[int(position)] * .5 else: r = 0 g = 0 b = 0 position += .01 if (position > 499): position = 0 # padXData = touchOSC.padXData # padYData = int(touchOSC.padYData * .65) # print padYData # print touchOSC.padYData # r,g,b = colorOSC # if x == padXData and z == padYData: # r,g,b = draw.circle(padXData,padYData, x, z,colorOSC) # draw.circle(5,5, x, z) return (r, g, b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += c...
[ "0.700929", "0.69985324", "0.6966529", "0.68848133", "0.6798888", "0.6789337", "0.6751443", "0.67474806", "0.6740431", "0.65576667", "0.6495866", "0.6494163", "0.6480991", "0.64658093", "0.64402866", "0.64176357", "0.64166456", "0.64017946", "0.63647753", "0.63250977", "0.631...
0.0
-1
Converts a wave to a vector of prosodic features. offset (in ms) determines where the signal will be sampled. window_len is ignored.
def wav_to_prosodic(path, sr=16000, offset=10): sound = parselmouth.Sound(path) pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling intensity = sound.to_intensity() features = [] max_time = sound.get_total_duration() for time in np.arange(0, max_time, 0.001): f0 = pitch.get_value_at_time(time) f0_nan = 0 if np.isnan(f0): f0 = 0 f0_nan = 1 int_db = intensity.get_value(time) if np.isnan(int_db): int_db = 0 features.append([f0, f0_nan, int_db]) array_feats = np.array(features).T print("SHAPE OF THE FEATURES:", array_feats.shape) assert(not np.any(np.isnan(array_feats))) return array_feats, max_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[...
[ "0.5480287", "0.5404212", "0.5282063", "0.50624555", "0.50224143", "0.50093454", "0.4989605", "0.49353826", "0.486388", "0.48465458", "0.4828349", "0.47970074", "0.47777593", "0.47630838", "0.4727671", "0.47226426", "0.4674579", "0.4674468", "0.46724492", "0.46629953", "0.464...
0.6025671
0
Converts a wave to a vector of prosodic features. offset (in ms) determines where the signal will be sampled. window_len is ignored.
def wav_to_intensity(path, sr=16000, offset=10): sound = parselmouth.Sound(path) intensity = sound.to_intensity() features = [] max_time = sound.get_total_duration() for time in np.arange(0, max_time, 0.001): int_db = intensity.get_value(time) if np.isnan(int_db): int_db = 0 features.append([int_db]) array_feats = np.array(features).T print("SHAPE OF THE FEATURES:", array_feats.shape) assert(not np.any(np.isnan(array_feats))) return array_feats, max_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wav_to_prosodic(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n ...
[ "0.6025386", "0.54781526", "0.5399577", "0.5278359", "0.50593245", "0.50171155", "0.50058156", "0.49881226", "0.49332815", "0.4863905", "0.4842912", "0.48232105", "0.47933024", "0.47808012", "0.4761141", "0.47247434", "0.47176567", "0.46723312", "0.46707693", "0.4661695", "0....
0.46706223
19
Load dumped object handled by file_name. If file_name is None, then default file name is used.
def model_load(file_name=None): if file_name is None : file_name = "./data/_oP5_SegmentClassifier.dump" else: pass return p5_util.object_load(file_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadFromFile(file_name=\"saved_object.pickle\"):\n\n try:\n with open(file_name, \"rb\") as inputToLoad:\n loaded_object = pickle.load(inputToLoad)\n return loaded_object\n except IOError:\n raise InvalidFilesPath\n except ImportError as e:\n raise InvalidFile(\n...
[ "0.71736634", "0.7085416", "0.7077506", "0.7077506", "0.6953144", "0.6927678", "0.6907037", "0.68987334", "0.680562", "0.67826605", "0.67519313", "0.6732998", "0.67002213", "0.6668907", "0.66492176", "0.6630586", "0.6611128", "0.6594634", "0.6578726", "0.65453917", "0.6457661...
0.6612186
16
Print percentage of rows that have been processed.
def _print_stat_rows(title,rows_before,rows_after): self.strprint(str(title)+" : Percent of processed rows = %1.2F"\ %(np.abs(rows_before-rows_after)*100/rows_before))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printProgress(self, percentage):\n #print '%s\\r' % ' '*20, # clean up row\n #print '%3d%% ' % percentage, # ending with comma prevents newline from being appended\n sys.stdout.flush()", "def PrintProgress(self):\n print ' Examined %d nodes, found %d unique...' % (\n ...
[ "0.7295432", "0.690456", "0.6810523", "0.65595317", "0.64016294", "0.63650185", "0.63531333", "0.6352935", "0.6349985", "0.6310579", "0.6283685", "0.6247981", "0.6222485", "0.6173231", "0.61398053", "0.61388904", "0.61365205", "0.6132179", "0.61136687", "0.61033106", "0.60934...
0.74636936
0
Print percentage of rows that have been processed.
def print_stat_rows(title,rows_before,rows_after): _print_stat_rows(title,rows_before,rows_after)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_stat_rows(title,rows_before,rows_after):\n self.strprint(str(title)+\" : Percent of processed rows = %1.2F\"\\\n %(np.abs(rows_before-rows_after)*100/rows_before))", "def printProgress(self, percentage):\n #print '%s\\r' % ' '*20, # clean up row\n #print '%3d%% ' % percentage,...
[ "0.74636936", "0.7295432", "0.690456", "0.6810523", "0.65595317", "0.64016294", "0.63650185", "0.63531333", "0.6352935", "0.6349985", "0.6310579", "0.6283685", "0.6247981", "0.6222485", "0.6173231", "0.61398053", "0.61388904", "0.61365205", "0.6132179", "0.61136687", "0.61033...
0.0
-1
Encapsulation of print function. If flag is_verbose is fixed to True, then print takes place.
def strprint(self, mystr): if self.is_verbose is True: print(mystr) else: pass return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _verbose(self,text):\n if self.verbose:\n print(text)", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def verbose_print(verbose, print_function=None):\n\n if verbose:\n return print_function or print\n else:\n def vprint(*args, **kwars):\n ...
[ "0.7844468", "0.7715159", "0.76063657", "0.75858647", "0.74731535", "0.7468149", "0.74547803", "0.7313309", "0.72769666", "0.72654116", "0.7237131", "0.7225605", "0.7189013", "0.7179139", "0.7146172", "0.70976067", "0.70916754", "0.70261127", "0.6895416", "0.68448365", "0.682...
0.68400043
20
Copy all attributes from a given object P5_SegmentClassifier object into self.
def copy(self, other_object): #------------------------------------------------------------------------- # Debug parameters #------------------------------------------------------------------------- self._path_to_model = other_object.path_to_model #------------------------------------------------------------------------- # Data-model parameters #------------------------------------------------------------------------- self.is_verbose = other_object.is_verbose self._df_invoice_line = other_object._df_invoice_line.copy() self._total_outliers = other_object._total_outliers self._df_invoice_ref = other_object._df_invoice_ref.copy() self._list_quant_feature = other_object._list_quant_feature.copy() self._list_feature_to_drop = other_object._list_feature_to_drop.copy() self._df_invoice_original = other_object._df_invoice_original.copy() if other_object._arr_sample_customerID is not None: self._arr_sample_customerID = other_object._arr_sample_customerID.copy() else : self._arr_sample_customerID = None self._df_invoice_line_out_sample \ = other_object._df_invoice_line_out_sample.copy() #------------------------------------------------------------------------- # RFM features #------------------------------------------------------------------------- self._is_rfm_encode = other_object._is_rfm_encode self._encoder_rfm = other_object._encoder_rfm self._df_customers_rfm = other_object._df_customers_rfm.copy() self.df_customers_rfm_fileName = other_object.df_customers_rfm_fileName self.df_RFM_quantiles = other_object.df_RFM_quantiles self._day_now = other_object._day_now self._is_transform_rfm = other_object._is_transform_rfm #------------------------------------------------------------------------- # Time features #------------------------------------------------------------------------- self._list_new_feature = other_object._list_new_feature self._pca_timeFeature = other_object._pca_timeFeature self._std_scaler_timeFeature = other_object._std_scaler_timeFeature self._df_customers_timeFeature_fileName \ = other_object._df_customers_timeFeature_fileName if other_object._dict_timeFeature_encoder is not None: self._dict_timeFeature_encoder \ = other_object._dict_timeFeature_encoder.copy() else: self._dict_timeFeature_encoder = other_object._dict_timeFeature_encoder if other_object._df_customers_timeFeature is not None: self._df_customers_timeFeature \ = other_object._df_customers_timeFeature.copy() else: self._df_customers_timeFeature = other_object._df_customers_timeFeature self._is_transform_timeFeature = other_object._is_transform_timeFeature #------------------------------------------------------------------------- # NLP features #------------------------------------------------------------------------- self._vectorizer_nlp = other_object._vectorizer_nlp self._matrix_weights_nlp = other_object._matrix_weights_nlp self._df_customers_nlp_fileName = other_object._df_customers_nlp_fileName self._pca_nlp = other_object._pca_nlp self._df_customers_pca_nlp = other_object._df_customers_pca_nlp.copy() self._nlp_pca_ndim = other_object._nlp_pca_ndim self._is_transform_nlp = other_object._is_transform_nlp #------------------------------------------------------------------------- # All features #------------------------------------------------------------------------- self._df_customers_fileName = other_object._df_customers_fileName self._df_customers = other_object._df_customers.copy() #------------------------------------------------------------------------- # Classifier #------------------------------------------------------------------------- if other_object._y_clusters is not None: self._y_clusters = other_object._y_clusters.copy() else: self._y_clusters = other_object._y_clusters self._dict_classifier_param = other_object._dict_classifier_param.copy() self._classifier_name = other_object._classifier_name self._classifier_model = other_object._classifier_model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(self, threshold):\n self.indicator = threshold['indicator']\n self.stage = threshold['stage']\n self.begin = threshold['begin']\n self.end = threshold['end']\n self.quality = threshold['quality']\n self.weight = threshold['weight']\n return self", "def po...
[ "0.5413871", "0.51940006", "0.5167523", "0.5163446", "0.5083279", "0.49784464", "0.49112073", "0.48626807", "0.48342088", "0.48338348", "0.482826", "0.48198396", "0.48187032", "0.4814801", "0.48118797", "0.4783213", "0.4774069", "0.47707164", "0.47706905", "0.47557005", "0.47...
0.47427928
20
Remove raws with countries other then 'United Kingdom' then remove Country feature.
def _feature_country_process(self): if 'Country' not in self._df_invoice_line.columns: return list_countries_keep = ['United Kingdom'] rows_before = self._df_invoice_line.shape[0] df_invoice_line_new = pd.DataFrame() for country in list_countries_keep : df_invoice_line_new = df_invoice_line_new.append(\ self._df_invoice_line[self._df_invoice_line['Country']==country]\ , ignore_index=True) self.df_invoice_line = df_invoice_line_new del(df_invoice_line_new) rows_after = self._df_invoice_line.shape[0] _print_stat_rows("Countries filtering : ",rows_before, rows_after) #------------------------------------------------------------------------- # Due to the fact only one country is used, then this feature is dropped #------------------------------------------------------------------------- list_col_to_keep = [col for col in self._df_invoice_line.columns \ if col not in 'Country'] self._df_invoice_line = self._df_invoice_line[list_col_to_keep] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_feature_filter(self):\n self.features = set()\n for language in self.data.values():\n features_in_data = set(language.keys())\n features_to_keep = features_in_data & self.feature_filter\n self.features |= features_to_keep\n features_to_remove = fe...
[ "0.59036386", "0.5863824", "0.5825038", "0.5797917", "0.5704074", "0.5514831", "0.55131525", "0.5411934", "0.5399952", "0.53786814", "0.53708786", "0.53605676", "0.5328933", "0.53094655", "0.51719415", "0.5159425", "0.51442134", "0.5126341", "0.51245403", "0.50806504", "0.506...
0.70100856
0
Builds features issued from InvoiceDate. A dataframe is built per new feature and dumped into a file. Each one of the dataframe have encoded features issues from InvoiceDate.
def data_transform_timeFeature(self): #------------------------------------------------------------------------- # All new features are built into separate dataframes # and each of them are dumped into a separate file. #------------------------------------------------------------------------- self.strprint("self.df_invoice_line : "+str(self.df_invoice_line.shape)) self._dict_timeFeature_encoder, df_customers_timeFeature \ = p5_util.time_list_feature_build(self.df_invoice_line\ , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\ ,is_verbose=self.is_verbose) #------------------------------------------------------------------------- # New time features are aggregated into a single dataframe. # Values are scaled. #------------------------------------------------------------------------- df_customers_timeFeature, self._std_scaler_timeFeature \ = p5_util.time_list_feature_restore(self._list_new_feature \ , std_scale = self._std_scaler_timeFeature\ , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose) self.strprint("df_customers_timeFeature : "+str(df_customers_timeFeature.shape)) #------------------------------------------------------------------------- # Dimension reduction thanks to PCA #------------------------------------------------------------------------- n_dim=30 root_name = 'time_pca_' # Column CustomerID is used into df_pca_reduce df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index df_customers_timeFeature, pca_timeFeature \ = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\ , p_is_scale=False, pca = self._pca_timeFeature) self.strprint(df_customers_timeFeature.shape) if self._pca_timeFeature is None: #---------------------------------------------------------------------- # Data-model is in built process with part of data-set. #---------------------------------------------------------------------- self._pca_timeFeature = pca_timeFeature p5_util.object_dump(df_customers_timeFeature\ , self._df_customers_timeFeature_fileName) else: #---------------------------------------------------------------------- # Data-model is already built and this method is called # for a customer classification. #---------------------------------------------------------------------- self._df_customers_timeFeature = df_customers_timeFeature.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_date_features(df = None, date = None):\n #TODO", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature ...
[ "0.6386668", "0.6208587", "0.62022436", "0.60953605", "0.60796374", "0.60608006", "0.6022764", "0.5993953", "0.5985361", "0.59586924", "0.5820426", "0.5809583", "0.58089864", "0.58070034", "0.5787359", "0.575435", "0.57058054", "0.56917906", "0.56418794", "0.56213087", "0.560...
0.62781596
1
Builds for each customer, RFM scores and encode scores. When this method is called during building data_model step, then dataframe handling new RFM features is dumped into a file.
def data_transform_rfm(self) : is_built_step = False if self._encoder_rfm is None: is_built_step = True #------------------------------------------------------------------------- # RFM feature is built #------------------------------------------------------------------------- ser_invoice_date = self._df_invoice_line.InvoiceDate self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \ = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\ , df_RFM_threshold=self.df_RFM_quantiles) self._df_invoice_line.InvoiceDate = ser_invoice_date #------------------------------------------------------------------------- # RFM score is added to dataframe #------------------------------------------------------------------------- df_merged = pd.merge(self.df_invoice_line\ , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID']) self._df_invoice_line \ = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\ , columns=df_merged.columns) #self._df_invoice_line \ #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\ #,join='inner') #------------------------------------------------------------------------- # RFM encoding #------------------------------------------------------------------------- self._encoder_rfm, df_RFM_encoded \ = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm) #------------------------------------------------------------------------- # Encoded RFM features are renamed #------------------------------------------------------------------------- df_customers_rfm, list_col_unchanged \ = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\ , 'w_rfm_') self.strprint("df_customers_rfm =" +str(df_customers_rfm.shape)) #------------------------------------------------------------------------- # dataframe with RFM encoded values per customer is dumped #------------------------------------------------------------------------- if is_built_step is True: p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName) else : self._df_customers_rfm = df_customers_rfm.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customer...
[ "0.70747465", "0.7067084", "0.62919354", "0.6007114", "0.58264214", "0.5718458", "0.560313", "0.56008935", "0.55872756", "0.55600125", "0.5532584", "0.5520967", "0.55154175", "0.5512787", "0.54379237", "0.53890693", "0.53649795", "0.5364341", "0.53604376", "0.53556436", "0.53...
0.724281
0
Creates new features from Description feature thanks to NLTK, a NLP package. NLP features are handled into a dataframe. A PCA reduction is applied on this dataframe. Features from dataframe are renamed with root ane w_nlp. When this method is called during building data_model step, then dataframe handling new NLP feature is dumped into a file.
def data_transform_nlp(self): df_invoice_line = None is_build_step = False if self._vectorizer_nlp is None: is_build_step = True list_no_words=['SET','PACK'] df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \ = p5_util.nlp_process(self.df_invoice_line\ , 'Description' , vectorizer= self._vectorizer_nlp\ , list_no_words=list_no_words, is_verbose= self.is_verbose) if df_invoice_line is None: self.strprint("***ERROR : NLP process interrupted!") return #------------------------------------------------------------------------- # NLP weights are cumulated (sumerized) per customer #------------------------------------------------------------------------- if csr_matrix_weights is None: csr_matrix_weights \ = p5_util.object_load('./data/matrix_weights_NLP.dump') else: pass self.strprint("df_invoice_line : "+str(df_invoice_line.shape)) self.dbg_df = df_invoice_line.copy() root_name = 'w_nlp_' self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\ , csr_matrix_weights, root_name) del(csr_matrix_weights) #------------------------------------------------------------------------- # Dimension reduction thanks to PCA #------------------------------------------------------------------------- self.strprint("self._df_w_nlp : "+str(self._df_w_nlp.shape)) root_name_pca = 'nlp_pca_' n_dim = self._nlp_pca_ndim df_customers_pca_nlp, self._pca_nlp \ = p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\ , p_is_scale=False, pca=self._pca_nlp) self.strprint("df_customers_pca_nlp : " +str(df_customers_pca_nlp.shape)) #------------------------------------------------------------------------- # Backup of NLP features per customer #------------------------------------------------------------------------- if is_build_step is True: p5_util.object_dump(df_customers_pca_nlp\ , self._df_customers_nlp_fileName) else: self._df_customers_pca_nlp = df_customers_pca_nlp.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature_description_nlp(self):\n \n #-------------------------------------------------------------------------\n # Returned dataframe is aggregated with weights from self.vectorizer\n #-------------------------------------------------------------------------\n list_no_words=['SET','PAC...
[ "0.7509502", "0.63895476", "0.63655084", "0.6190819", "0.61304533", "0.60974437", "0.6039494", "0.6013443", "0.6001541", "0.59564126", "0.59309864", "0.584246", "0.58098847", "0.5809157", "0.5786593", "0.57630605", "0.57536405", "0.57465136", "0.57437116", "0.5743342", "0.574...
0.6648716
1
Proceed to data transformation in order to deliver computable data model.
def data_transform(self, df) : #------------------------------------------------------------------------- # Copy of given dataframe to be transformed #------------------------------------------------------------------------- self.df_invoice_line = df #------------------------------------------------------------------------- # Features issued from InvoiceDate are created #------------------------------------------------------------------------- if self.is_transform_timeFeature is True: self.strprint("\n*** Time features transformation ***") self.data_transform_timeFeature() #------------------------------------------------------------------------- # RFM is computed and encoded #------------------------------------------------------------------------- if self.is_transform_rfm is True: self.strprint("\n*** RFM transformation ***") self.data_transform_rfm() #------------------------------------------------------------------------- # NLP features issued from Description are created #------------------------------------------------------------------------- if self.is_transform_nlp is True: self.strprint("\n*** NLP transformation ***") self.data_transform_nlp() return self.df_invoice_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _apply_transform(self):\n pass", "def transform(self, data):", "def _transform(self, dataset):\n raise NotImplementedError()", "def _transform_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def _process(self):\n # choose the correct transform model befor...
[ "0.71496886", "0.7044139", "0.7000777", "0.6858309", "0.6785153", "0.6729821", "0.67230743", "0.65904725", "0.65476114", "0.6538246", "0.6538246", "0.6501432", "0.6452522", "0.6446105", "0.6395718", "0.639384", "0.639384", "0.639384", "0.639384", "0.639384", "0.639384", "0....
0.56914866
67
Build dataframe df_customers from transformed data. Transformed data are issued from NLP, Time and RFM features. See data_transform(). These data are stored as dataframes attributes.
def df_customers_features_build(self): df_customers_rfm = self._df_customers_rfm.copy() df_customers_timeFeature = self._df_customers_timeFeature.copy() df_customers_nlp = self._df_customers_pca_nlp.copy() #------------------------------------------------------------------------- # Dataframe are aggregated; note that indexes are customerID. #------------------------------------------------------------------------- df_customers = pd.DataFrame() df_customers = pd.concat([df_customers,df_customers_rfm], axis=1) df_customers = pd.concat([df_customers,df_customers_timeFeature]\ , join='inner', axis=1) df_customers = pd.concat([df_customers,df_customers_nlp]\ , join='inner', axis=1) self.strprint("All features : "+str(df_customers.shape)) self._df_customers = df_customers.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'...
[ "0.70902693", "0.6727287", "0.6064118", "0.60265195", "0.6024846", "0.5994544", "0.596741", "0.58626324", "0.584423", "0.5838772", "0.5787832", "0.5785175", "0.5773647", "0.5766124", "0.5743439", "0.57312465", "0.57225657", "0.5721255", "0.5717279", "0.5693215", "0.5649832", ...
0.80397373
0
Build dataframe df_customers from transformed data. Transformed data are loaded from dumped files issued from NLP, Time and RFM features. See data_transform()
def df_customers_fileRead(self): #------------------------------------------------------------------------- # RFM features are restored #------------------------------------------------------------------------- df_customers_rfm \ = p5_util.object_load(self.df_customers_rfm_fileName) self.strprint("RFM features : "+str(df_customers_rfm.shape)) #------------------------------------------------------------------------- # Time features are restored #------------------------------------------------------------------------- df_customers_timeFeature \ = p5_util.object_load(self._df_customers_timeFeature_fileName) self.strprint("Time features : "+str(df_customers_timeFeature.shape)) #------------------------------------------------------------------------- # NLP features are restored #------------------------------------------------------------------------- df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName) self.strprint("NLP features : "+str(df_customers_nlp.shape)) if False: df_customers_rfm = self._df_customers_rfm.copy() df_customers_timeFeature = self._df_customers_timeFeature.copy() df_customers_nlp = self._df_customers_pca_nlp.copy() #------------------------------------------------------------------------- # Dataframe are aggregated; note that indexes are customerID. #------------------------------------------------------------------------- df_customers = pd.DataFrame() df_customers = pd.concat([df_customers,df_customers_rfm], axis=1) df_customers = pd.concat([df_customers,df_customers_timeFeature]\ , join='inner', axis=1) df_customers = pd.concat([df_customers,df_customers_nlp]\ , join='inner', axis=1) self.strprint("All features : "+str(df_customers.shape)) #---------------------------------------------------------------------- # Dataframe is dumped into a file #---------------------------------------------------------------------- p5_util.object_dump(df_customers, self._df_customers_fileName) if False: #---------------------------------------------------------------------- # Dataframe is copied as an attribute #---------------------------------------------------------------------- self._df_customers = df_customers.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n ...
[ "0.78012073", "0.70509386", "0.6482284", "0.6199711", "0.5975251", "0.5896229", "0.58942175", "0.5884299", "0.5793099", "0.57307404", "0.5701118", "0.568415", "0.56560767", "0.5655341", "0.56509733", "0.5625573", "0.5603968", "0.55976146", "0.55959636", "0.5586056", "0.557069...
0.7298348
1
Creates a new customer identifier from existing dataset.
def createCustomerID(self): customerID = self._df_invoice_original.CustomerID.max() customerID += 1 return int(customerID)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:\n customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4\n return customer_id", "def create_customer(cls, api, **data):\n return api.create_customer(**data)", "def create_customer(data):\n...
[ "0.6462454", "0.63013685", "0.6199043", "0.6019253", "0.60129994", "0.60011494", "0.59338015", "0.59171224", "0.5879934", "0.58377534", "0.5820006", "0.5756114", "0.5745354", "0.57360977", "0.57085544", "0.5678208", "0.5672292", "0.56669915", "0.56526893", "0.5649953", "0.564...
0.6483069
0
Drop from df_invoice_line dataframe features in list given as parameter. All elements from list are checked to be into dataframe columns.
def list_feature_drop(self): list_to_drop = list() list_not_in_df = list() #------------------------------------------------------------------------- # Columns are checked to be into df_invoice_line dataframe #------------------------------------------------------------------------- for col in self._list_feature_to_drop: if col in self.df_invoice_line.columns: list_to_drop.append(col) else: list_not_in_df.append(col) if 0 == len(list_to_drop): self.strprint("\n*** ERROR : no element in list belonging to dataframe!") else: if len(self._list_feature_to_drop) != len(list_to_drop): self.strprint("\n*** WARNING : followings features do not belong to \ dataframe : {}".format(list_not_in_df)) else: pass list_col_keep \ = [col for col in self.df_invoice_line.columns \ if col not in list_to_drop] s self.df_invoice_line = self.df_invoice_line[list_col_keep] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop_dfcol(self, drop_list):\n self.data = self.df\n for lbl in drop_list:\n self.data = self.data.drop(lbl, axis=1)\n self.n_features = np.shape(self.data)[1]", "def drop(self,df, column_list):\n df.drop(columns = column_list, inplace = True)\n return df", "de...
[ "0.6483683", "0.61852366", "0.61701894", "0.61167115", "0.5998328", "0.5879889", "0.5828969", "0.57773757", "0.57595223", "0.56772375", "0.55653924", "0.55105036", "0.5499361", "0.5488699", "0.5475562", "0.54356617", "0.5433665", "0.5431893", "0.54176056", "0.5401929", "0.537...
0.7785573
0
Process df_invoice_line.Description with NLTK package.
def feature_description_nlp(self): #------------------------------------------------------------------------- # Returned dataframe is aggregated with weights from self.vectorizer #------------------------------------------------------------------------- list_no_words=['SET','PACK'] self.df_invoice_line, vectorizer, matrix_weights \ = p5_util.nlp_process(self.df_invoice_line,'Description'\ , vectorizer=self.vectorizer, list_no_words=list_no_words) #------------------------------------------------------------------------- # Each vectorized column 'x' is renamed w_nlp_i #------------------------------------------------------------------------- dict_matching_name = dict() for col in self.df_invoice_line.columns: if str(col).isdigit() is True: new_col_name = "w_nlp_"+str(col) dict_matching_name[col] = new_col_name self.df_invoice_line.rename(columns=dict_matching_name,inplace=True) #------------------------------------------------------------------------- # Description is droped from columns #------------------------------------------------------------------------- del(self.df_invoice_line['Description'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['Inv...
[ "0.5984977", "0.5817351", "0.5476201", "0.5464979", "0.54381603", "0.5437341", "0.539142", "0.5348691", "0.53211063", "0.5294485", "0.5236899", "0.51855206", "0.51376784", "0.51221997", "0.50945246", "0.50891274", "0.5085302", "0.50443596", "0.5012782", "0.5011681", "0.493943...
0.7215489
0
Standardize quantitatives features. Standardizer is stored as object attribute. It will be copied into P5_SegmentClassifier object.
def feature_scale(self): #------------------------------------------------------------------------- # List of quantitative features to be standardized #------------------------------------------------------------------------- list_quant_feature = ['Quantity','UnitPrice'] self._list_quant_feature = list_quant_feature.copy() #------------------------------------------------------------------------- # Standardization is applied over quantitative features in list. #------------------------------------------------------------------------- X_std = self.std_scale.transform(self.df_invoice_line[self.list_quant_feature]) df_quant_std = pd.DataFrame(X_std, index=self.df_invoice_line.index) #------------------------------------------------------------------------- # Columns from standardized dataframe are renamed #------------------------------------------------------------------------- df_quant_std.rename(columns={0:'STD_Quantity',1:'STD_UnitPrice'}\ ,inplace=True) #------------------------------------------------------------------------- # Standardized values dataframe is aggregated to df_invoice_line #------------------------------------------------------------------------- list_col_drop = ['Quantity','UnitPrice'] list_col_keep = \ [col for col in self.df_invoice_line.columns if col not in list_col_drop ] self.df_invoice_line = self.df_invoice_line[list_col_keep] self.df_invoice_line \ = pd.concat([self.df_invoice_line,df_quant_std], axis=1) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def standardiser(self):\n # Select only numeric features first\n\n #self.X = self.data.loc[:, self.data.columns != self.target].values\n numeric_columns = []\n for col in self.X.columns:\n if self.X[col].dtype!='object':\n numeric_columns.append(col)\n s...
[ "0.66549325", "0.5813407", "0.57823", "0.57652164", "0.57482", "0.5628171", "0.56227326", "0.54852945", "0.54692763", "0.5413241", "0.5376666", "0.53717995", "0.5229775", "0.51795113", "0.5171377", "0.51348805", "0.51237977", "0.5088582", "0.5076588", "0.50762415", "0.5056169...
0.64846426
1
Returns market segment ID related to a customer thanks to customer invoices lines given as parameter. Features transformations are applied on data included into invoice lines. Once done, a machine learning algorithm is invocated in order to predict customer market segment.
def get_customer_marketSegment(self, df_invoice_line_customer): #------------------------------------------------------------------------- # Building data model #------------------------------------------------------------------------- self.data_transform(df_invoice_line_customer) #------------------------------------------------------------------------- # Customer features are built thanks to transformers. #------------------------------------------------------------------------- self.df_customers_features_build() #------------------------------------------------------------------------- # Customer market segment is predicted #------------------------------------------------------------------------- X_test = self._df_customers.values y_pred = self._classifier_model.predict(X_test) segmentID = y_pred[0] return segmentID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_segment(self, df_invoice_line=None):\n if df_invoice_line is not None:\n self.data_transform(df_invoice_line) \n self.df_customers_features_build() \n else:\n pass\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n...
[ "0.7056929", "0.62926954", "0.61490464", "0.6014562", "0.59545195", "0.5511189", "0.54317766", "0.5338872", "0.53378683", "0.53256035", "0.5227046", "0.5198791", "0.5182063", "0.51410466", "0.5138717", "0.51151025", "0.51103526", "0.51066583", "0.5078583", "0.5048637", "0.502...
0.81526893
0
This function creates an invoice compounding invoices lines from data given as parameters. Once done, this function computes market segment customer belongs to. If customerID is None, then a new customer identifier is created before order process to take place.
def order_process(self, customerID, list_stockCode, list_quantity\ , orderDate=None): segmentID = -1 #------------------------------------------------------------------------- # A new customer is created and inserted into data-set. #------------------------------------------------------------------------- if customerID is None: customerID = int(self.createCustomerID()) else: pass #------------------------------------------------------------------------- # A new dataframe with new invoice lines are created. #------------------------------------------------------------------------- df_invoice_line = self.create_customer_df_invoice_line(customerID\ , list_stockCode, list_quantity, orderDate) #------------------------------------------------------------------------- # Original dataframe is updated with customer invoices lines. #------------------------------------------------------------------------- print("order_process : shape before concat= "+str(self._df_invoice_original.shape)) self._df_invoice_original \ = pd.concat([self._df_invoice_original, df_invoice_line], axis=0) print("order_process : shape after concat= "+str(self._df_invoice_original.shape)) #------------------------------------------------------------------------- # All invoices lines (including new one) related to customer is retrieved # from original dataframe. #------------------------------------------------------------------------- df_invoice_line_customer \ = self.get_customer_history_df_invoice_line(customerID) #------------------------------------------------------------------------- # When calling get_customer_marketSegment(), df_invoice_line_customer is # concatened to the original dataframe. #------------------------------------------------------------------------- segmentID = self.get_customer_marketSegment(df_invoice_line_customer) return segmentID, customerID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #--------------------------------------------------------------...
[ "0.65959746", "0.6386151", "0.6226632", "0.5918294", "0.58827716", "0.5865145", "0.5850917", "0.58120507", "0.57048744", "0.57040673", "0.563682", "0.56352204", "0.5545573", "0.55451196", "0.5510552", "0.5419621", "0.54099417", "0.5400348", "0.53597164", "0.5323142", "0.53154...
0.68115205
0
Performs data processing in order data to feed prediction algorithm.
def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\ , UnitPrice ): dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\ , 'Quantity':Quantity, 'UnitPrice':UnitPrice} dict_invoice['CustomerID'] = CustomerID dict_invoice['InvoiceNo'] = InvoiceNo df_invoice_line \ = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0]) self.data_transform(df_invoice_line) #self.feature_rfm_encode() self.feature_scale() self.list_feature_drop() self.feature_description_nlp() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, data):\n return self.estimator.predict(data)", "def process(self, data_batch: Any, predictions: Sequence[dict]) -> None:\n self.results.extend(_to_cpu(predictions))", "def run_prediction(self):\r\n self.get_prediction_indices()\r\n self.walk_forward_prediction()", ...
[ "0.6911158", "0.68740946", "0.6579429", "0.65545535", "0.65444416", "0.6536913", "0.64836574", "0.6466419", "0.6466419", "0.6391505", "0.63673496", "0.6256279", "0.6249069", "0.6240798", "0.62286144", "0.6211028", "0.6206762", "0.61867833", "0.6183594", "0.6156628", "0.614365...
0.0
-1
Return the segment identifier a customers is predicted to belongs to.
def predict_segment(self, df_invoice_line=None): if df_invoice_line is not None: self.data_transform(df_invoice_line) self.df_customers_features_build() else: pass X_test = self._df_customers.values y_pred = self._classifier_model.predict(X_test) return y_pred[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n ...
[ "0.768172", "0.6201237", "0.61484486", "0.5937784", "0.5902539", "0.5902539", "0.58263624", "0.57347125", "0.5691365", "0.56732774", "0.5623136", "0.55613863", "0.5555856", "0.5535719", "0.5528222", "0.55247766", "0.5446849", "0.54316", "0.5398863", "0.5389348", "0.52877736",...
0.63881826
1
Returns list of stock codes from list of items descriptions.
def getStockCodeList(self, list_description=None): list_stockCode = list() df = self._df_invoice_original if list_description is None: list_stockCode = list(df.StockCode.unique()) else : for description in list_description: stockCode = df[df.Description==description].StockCode.unique()[0] list_stockCode.append(stockCode) return list_stockCode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDescriptionList(self, list_stockCode=None):\n df = self._df_invoice_original\n\n list_description = list()\n if list_stockCode is None :\n list_description = list(df.Description.unique())\n else:\n for stockCode in list_stockCode:\n description = df[df.StockCod...
[ "0.63454723", "0.5953065", "0.58789575", "0.5782579", "0.5730202", "0.551616", "0.5417973", "0.5402704", "0.5288644", "0.5257909", "0.5255299", "0.5200416", "0.5199842", "0.51931006", "0.5064142", "0.50508547", "0.50235844", "0.49738976", "0.4950196", "0.49323055", "0.4927703...
0.7344879
0
Returns list of imtes unit price from list of stock codes.
def getUnitPriceList(self, list_stockCode): df = self._df_invoice_original list_unitPrice = list() for stockCode in list_stockCode: unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0] list_unitPrice.append(unitPrice) return list_unitPrice
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Desc...
[ "0.6023552", "0.5827989", "0.5810261", "0.57817996", "0.57182974", "0.5632579", "0.55539757", "0.5493873", "0.5484011", "0.54804665", "0.54708654", "0.5443625", "0.5422661", "0.5405019", "0.5299616", "0.5295281", "0.527327", "0.5272383", "0.52600485", "0.52459085", "0.5221727...
0.8246834
0
Returns list of items descriptions from list of stock codes.
def getDescriptionList(self, list_stockCode=None): df = self._df_invoice_original list_description = list() if list_stockCode is None : list_description = list(df.Description.unique()) else: for stockCode in list_stockCode: description = df[df.StockCode==stockCode].Description.unique()[0] list_description.append(description) return list_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Desc...
[ "0.65685076", "0.566973", "0.5336407", "0.5295758", "0.51715976", "0.51711005", "0.5148782", "0.5139008", "0.508993", "0.5061929", "0.5032067", "0.49966037", "0.49956048", "0.4957243", "0.4957215", "0.4956653", "0.49447757", "0.49301252", "0.4907753", "0.49050713", "0.4902797...
0.7398652
0
Creates new dataframe with invoices lines issued from given parameters. Once done, the new dataframe is aggregated with original one.
def create_customer_df_invoice_line(self, customerID, list_stockCode\ , list_quantity, invoiceDate): dict_invoice = dict() dict_invoice['Quantity'] = list_quantity dict_invoice['StockCode'] = list_stockCode #------------------------------------------------------------------------ # Build invoiceDate from local current time #------------------------------------------------------------------------ if invoiceDate is None: time_struct = time.localtime() invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\ +'-'+str(time_struct.tm_mday) invoiceDate +=' ' invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\ +':'+str(time_struct.tm_sec) invoiceDate = pd.Timestamp(invoiceDate) else: pass #------------------------------------------------------------------------ # Lists initialization #------------------------------------------------------------------------ list_customerID = list() list_invoiceNo = list() list_invoiceDate = list() list_invoice_line_index = list() #------------------------------------------------------------------------ # Increase Invoice number #------------------------------------------------------------------------ invoiceNo = max(self._df_invoice_original.InvoiceNo) invoiceNo += 1 #------------------------------------------------------------------------ # Get latest invoice line index value #------------------------------------------------------------------------ invoice_line_index = max(self._df_invoice_original.index) #------------------------------------------------------------------------ # Build lists for CustomerID, InvoiceNo, InvoiceDate # A list of incremented indexes is built for new rows. #------------------------------------------------------------------------ for quantity in list_quantity: list_customerID.append(customerID) list_invoiceNo.append(invoiceNo) list_invoiceDate.append(invoiceDate) invoice_line_index += 1 list_invoice_line_index.append(invoice_line_index) dict_invoice['CustomerID'] = list_customerID dict_invoice['InvoiceNo'] = list_invoiceNo dict_invoice['InvoiceDate'] = list_invoiceDate #------------------------------------------------------------------------ # Get description list from list of stock codes. #------------------------------------------------------------------------ list_description = self.getDescriptionList(list_stockCode) dict_invoice['Description'] = list_description #------------------------------------------------------------------------ # Get unit price list from list of stock codes. #------------------------------------------------------------------------ list_unitPrice = self.getUnitPriceList(list_stockCode) dict_invoice['UnitPrice'] = list_unitPrice #------------------------------------------------------------------------ # Dataframe with new invoices lines is created. #------------------------------------------------------------------------ df_invoice_line \ = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\ , index=list_invoice_line_index) return df_invoice_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vend...
[ "0.58933586", "0.58273345", "0.5787141", "0.5730418", "0.57022715", "0.56592727", "0.5587117", "0.5582993", "0.5557562", "0.55526274", "0.55505604", "0.55257595", "0.5520605", "0.54978883", "0.5488627", "0.5473216", "0.5465187", "0.54585433", "0.54323196", "0.53706306", "0.53...
0.61920017
0
Returns a dataframe with all invoice lines from customerID given as parameter.
def get_customer_history_df_invoice_line(self, customerID): df_invoice_line \ = self._df_invoice_original[self._df_invoice_original.CustomerID \ == customerID] return df_invoice_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #--------------------------------------------------------------...
[ "0.6706535", "0.637564", "0.6069902", "0.59014344", "0.58117783", "0.5807765", "0.57650644", "0.56994027", "0.5648026", "0.5595924", "0.5531552", "0.55192786", "0.54935724", "0.54334897", "0.5429923", "0.5344283", "0.53376174", "0.5330216", "0.532311", "0.5300056", "0.5274764...
0.76564085
0
Returns a list of customers that have been excluded of data sampling used for building model. By default, 10 customers identifier is returned. If customerCount value is None, or <= 0, then list of all customers that have been excluded of data sampling is returned.
def get_listCustomer_out_sample(self, customerCount=10): if customerCount is None : listCustomer= list(self._df_invoice_line_out_sample.CustomerID.unique()) else: if customerCount <= 0 : listCustomer \ = list(self._df_invoice_line_out_sample.CustomerID.unique()) else: listCustomer \ = list(self._df_invoice_line_out_sample.CustomerID.unique()[:customerCount]) return listCustomer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_customers_count():\n data = user_obj.get_all_customers(\"1\")\n return data", "def pdelements_num_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(4), ctypes.c_int32(0))", "def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.c...
[ "0.5755343", "0.5630526", "0.5468172", "0.5455088", "0.5447088", "0.53655386", "0.5348006", "0.5255881", "0.52160037", "0.5184504", "0.5118855", "0.51016146", "0.50954854", "0.5094486", "0.50568765", "0.50267607", "0.49917355", "0.49625525", "0.49529928", "0.49519303", "0.493...
0.7302814
0
Returns a True if a customer identifier does not belongs to dataframe used to build classifier model.
def is_customer_out_sample(self, customerID): listCustomer = list(self._df_invoice_line_out_sample.CustomerID.unique()) is_flag = customerID in listCustomer return is_flag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_customer(self) -> bool:\n return self.customer_id is not None", "def has_customer(self):\n return self.customer is not None", "def is_customer(self):\n return self.user_type == 'C'", "def is_label_dataframe(label, df):\n\n setdiff = set(label) - set(df.columns.tolist())\n\n ...
[ "0.661681", "0.6462838", "0.59257627", "0.5741564", "0.5628499", "0.55574435", "0.5537199", "0.5509296", "0.5507439", "0.5461328", "0.5405652", "0.53825015", "0.5370098", "0.5365358", "0.5328702", "0.53143424", "0.529039", "0.52698815", "0.52663475", "0.52634853", "0.52370775...
0.63775516
2
Returns number of invoices from original dataset.
def get_invoice_count(self): return self._df_invoice_original.InvoiceNo.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)", "def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]", "def invoices(self):\r\n return inv.Invoices(self)", "def get_customer_count(self):...
[ "0.7744239", "0.73818606", "0.63354856", "0.6231307", "0.62079424", "0.6154787", "0.6135494", "0.6101816", "0.606694", "0.6054052", "0.60302943", "0.5938585", "0.5850497", "0.58089167", "0.57880515", "0.56934583", "0.568253", "0.56357646", "0.55606055", "0.55459076", "0.54968...
0.7667302
1
Returns number of customers from original dataset.
def get_customer_count(self): return self._df_invoice_original.CustomerID.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_of_sales_per_customer_ids():\n\n # your code", "def get_all_customers_count():\n data = user_obj.get_all_customers(\"1\")\n return data", "def customer_acccounting(customer_orders):", "def pdelements_num_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(4), ...
[ "0.7528541", "0.741451", "0.7031075", "0.6915427", "0.68836576", "0.6863681", "0.6667784", "0.616603", "0.61287034", "0.6101881", "0.59332806", "0.5910718", "0.5855194", "0.58472735", "0.58410436", "0.5774803", "0.57345396", "0.57238173", "0.572342", "0.5717885", "0.56658965"...
0.8029719
0
Returns number of invoice lines (number of rows) from original dataset.
def get_invl_count(self): return self._df_invoice_original.index.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]", "def getNumRows(self) -> int:\n ...", "def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)", "def getNoOfRows(self):\n retur...
[ "0.7554764", "0.69679993", "0.69573605", "0.6792377", "0.6672587", "0.6513209", "0.649585", "0.6461261", "0.63839567", "0.63666666", "0.62937206", "0.62934756", "0.62633395", "0.6255323", "0.62531394", "0.6229491", "0.62243587", "0.62149376", "0.6199522", "0.61816734", "0.614...
0.7293856
1
Returns a json sructure built from given parameters. {
def json_all_builder(self, customer_count, invoice_count, invl_count ): json_result = '{\n' json_result += '\t "_results":[\n' json_result += '\t\t{ "customer_count": "' + str(customer_count) json_result += ', "invoice_count": "' + str(invoice_count) json_result += ', "invl_count": "' + str(invl_count) json_result += '}\n' json_result += '\n\t]\n}' return json_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_json_string(self, **kwargs):\n ...", "def format_data(self, params):\n return json.dumps(params)", "def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robo...
[ "0.6857647", "0.6645711", "0.6554232", "0.65235794", "0.6426044", "0.6354805", "0.6256294", "0.62538326", "0.62132627", "0.62050444", "0.6196949", "0.6165802", "0.6154298", "0.6132933", "0.6096993", "0.6083696", "0.6067892", "0.6067397", "0.606541", "0.60606605", "0.6041364",...
0.5603465
63
Returns JSON structure issued form dataframe content given as parameter .
def json_df_builder(self, df, marketID, RFM=None): #------------------------------------------------------------------------- # Extract from dataframe content to be returned #------------------------------------------------------------------------- str_customerID = str(df.CustomerID.unique()[0]) invoice_count = len(df.InvoiceNo.unique()) item_count = df.Quantity.sum() invl_count = df.shape[0] ser_incomes = df.UnitPrice * df.Quantity incomes = ser_incomes.sum() str_incomes = "{0:1.2F}".format(incomes) mean_unit_price = incomes/item_count str_mean_unit_price = "{0:1.2F}".format(mean_unit_price) serInvoiceDate = df.InvoiceDate str_old_date = serInvoiceDate.map(str).min() str_new_date = serInvoiceDate.map(str).max() #------------------------------------------------------------------------- # Build JSON structure form content #------------------------------------------------------------------------- json_result = '{\n' json_result += '\t "_results":[\n' json_result += "{\n" json_result += "\t\t"+" \"customerID\":"+str_customerID+"\n" json_result += "\t\t"+",\"marketID\":"+str(marketID)+"\n" json_result += "\t\t"+",\"invoice_count\":"+str(invoice_count)+"\n" json_result += "\t\t"+",\"item_count\":"+str(item_count)+"\n" json_result += "\t\t"+",\"invl_count\":"+str(invl_count)+"\n" json_result += "\t\t"+",\"mean_unit_price\":"+str_mean_unit_price+"\n" json_result += "\t\t"+",\"incomes\":"+str_incomes+"\n" json_result += "\t\t"+",\"old_date\":"+str_old_date+"\n" json_result += "\t\t"+",\"new_date\":"+str_new_date+"\n" if RFM is not None: json_result += "\t\t"+",\"RFM\":"+RFM+"\n" else: pass json_result += "}\n" json_result += '\n\t]\n}' return json_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_json(self):\n\t\treturn self._dataframe.reset_index().to_json(orient=\"records\")", "def Mydata():\n\n stmt = db.session.query(Appsdata).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n \n return jsonify(df.to_dict())", "def get_data(dataframe,index=None):\n dflen = len(datafra...
[ "0.64720726", "0.63688964", "0.62659526", "0.62423277", "0.61731887", "0.61728024", "0.6165461", "0.610696", "0.608971", "0.608501", "0.60734797", "0.6067765", "0.60032827", "0.5975856", "0.59611905", "0.5942278", "0.5940992", "0.59368914", "0.5936283", "0.5933788", "0.591012...
0.6871927
0