text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree_to_graph(bbltree:BubbleTree) -> Graph or Digraph: """Compute as a graphviz.Graph instance the given graph. If given BubbleTree instance is oriented, returned value is a graphviz.Digraph. See http://graphviz.readthedocs.io/en/latest/examples.html#cluster-py for graphviz API """
GraphObject = Digraph if bbltree.oriented else Graph def create(name:str): """Return a graphviz graph figurating a powernode""" ret = GraphObject('cluster_' + name) # dirty hack to get links between clusters: add a blank node inside # so the subgraph don't take it's name directly, but the blank node do. # ret.body.append('label = "{}"'.format(name)) # replaced by: ret.node(name, style='invis', shape='point') # ret.body.append('style=plaintext') ret.body.append('color=lightgrey') ret.body.append('label=""') ret.body.append('shape=ellipse') ret.body.append('penwidth=2') ret.body.append('pencolor=black') return ret nodes = frozenset(bbltree.nodes()) subgraphs = {} # build for each powernode the associated subgraph, and add its successors for powernode in bbltree.powernodes(): if powernode not in subgraphs: subgraphs[powernode] = create(powernode) for succ in bbltree.inclusions[powernode]: if succ not in subgraphs: if succ not in nodes: subgraphs[succ] = create(succ) else: subgraphs[powernode].node(succ) # add to Graph instances the Graph of successors as subgraphs for powernode, succs in bbltree.inclusions.items(): for succ in succs: if succ not in nodes: subgraphs[powernode].subgraph(subgraphs[succ]) # build the final graph by adding to it subgraphs of roots graph = GraphObject('graph', graph_attr={'compound': 'true'}) for root in bbltree.roots: if root in subgraphs: graph.subgraph(subgraphs[root]) # add the edges to the final graph for source, targets in bbltree.edges.items(): for target in targets: if source <= target: attrs = {} if source not in nodes: attrs.update({'ltail': 'cluster_' + source}) if target not in nodes: attrs.update({'lhead': 'cluster_' + target}) graph.edge(source, target, **attrs) # print(graph) # debug line # graph.view() # debug line return graph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """Returns OrderedDict whose keys are self.attrs"""
ret = OrderedDict() for attrname in self.attrs: ret[attrname] = self.__getattribute__(attrname) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_list(self): """Returns list containing values of attributes listed in self.attrs"""
ret = OrderedDict() for attrname in self.attrs: ret[attrname] = self.__getattribute__(attrname) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def uniq(pipe): ''' this works like bash's uniq command where the generator only iterates if the next value is not the previous ''' pipe = iter(pipe) previous = next(pipe) yield previous for i in pipe: if i is not previous: previous = i yield i
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_request(url, method, data, args, params, headers, cookies, timeout, is_json, verify_cert): """ Forge and send HTTP request. """
## Parse url args for p in args: url = url.replace(':' + p, str(args[p])) try: if data: if is_json: headers['Content-Type'] = 'application/json' data = json.dumps(data) request = requests.Request( method.upper(), url, data=data, params=params, headers=headers, cookies=cookies ) else: request = requests.Request( method.upper(), url, params=params, headers=headers, cookies=cookies ) ## Prepare and send HTTP request. session = requests.Session() session.verify = verify_cert r = session.send(request.prepare(), timeout=timeout) session.close() except requests.exceptions.Timeout: return { 'data': {}, 'cookies': CookieJar(), 'content_type': '', 'status': 0, 'is_json': False, 'timeout': True } try: content_type = r.headers.get('Content-Type', 'application/json') response = r.json() isjson = True except json.decoder.JSONDecodeError: content_type = r.headers.get('Content-Type', 'text/html') response = r.text isjson = False return { 'data': response, 'cookies': r.cookies, 'content_type': content_type, 'status': r.status_code, 'is_json': isjson, 'timeout': False }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def neighbors(self) -> List['Node']: """ The list of neighbors of the node. """
self._load_neighbors() return [edge.source if edge.source != self else edge.target for edge in self._neighbors.values()]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_neighbors(self) -> None: """ Loads all neighbors of the node from the local database and from the external data source if needed. """
if not self.are_neighbors_cached: self._load_neighbors_from_external_source() db: GraphDatabaseInterface = self._graph.database db_node: DBNode = db.Node.find_by_name(self.name) db_node.are_neighbors_cached = True db.session.commit() self.are_neighbors_cached = True if not self._are_neighbors_loaded: self._load_neighbors_from_database()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_neighbors_from_database(self) -> None: """ Loads the neighbors of the node from the local database. """
self._are_neighbors_loaded = True graph: Graph = self._graph neighbors: List[DBNode] = graph.database.Node.find_by_name(self.name).neighbors nodes: NodeList = graph.nodes for db_node in neighbors: graph.add_node(db_node.name, db_node.external_id) neighbor: Node = nodes.get_node_by_name(db_node.name) graph.add_edge(self, neighbor, 1, False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def key(self) -> Tuple[int, int]: """ The unique identifier of the edge consisting of the indexes of its source and target nodes. """
return self._source.index, self._target.index
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def edge_list(self) -> List[Edge]: """ The ordered list of edges in the container. """
return [edge for edge in sorted(self._edges.values(), key=attrgetter("key"))]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def beforeSummaryReport(self, event): '''Output profiling results''' self.prof.disable() stats = pstats.Stats(self.prof, stream=event.stream).sort_stats( self.sort) event.stream.writeln(nose2.util.ln('Profiling results')) stats.print_stats() if self.pfile: stats.dump_stats(self.pfile) if self.cachegrind: visualize(self.prof.getstats())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def separate(text): '''Takes text and separates it into a list of words''' alphabet = 'abcdefghijklmnopqrstuvwxyz' words = text.split() standardwords = [] for word in words: newstr = '' for char in word: if char in alphabet or char in alphabet.upper(): newstr += char if newstr != '': standardwords.append(newstr) return map(lambda x: x.lower(),standardwords)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def eliminate_repeats(text): '''Returns a list of words that occur in the text. Eliminates stopwords.''' bannedwords = read_file('stopwords.txt') alphabet = 'abcdefghijklmnopqrstuvwxyz' words = text.split() standardwords = [] for word in words: newstr = '' for char in word: if char in alphabet or char in alphabet.upper(): newstr += char if newstr not in standardwords and newstr != '' and newstr not in bannedwords: standardwords.append(newstr) return map(lambda x: x.lower(),standardwords)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def wordcount(text): '''Returns the count of the words in a file.''' bannedwords = read_file('stopwords.txt') wordcount = {} separated = separate(text) for word in separated: if word not in bannedwords: if not wordcount.has_key(word): wordcount[word] = 1 else: wordcount[word] += 1 return wordcount
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def tuplecount(text): '''Changes a dictionary into a list of tuples.''' worddict = wordcount(text) countlist = [] for key in worddict.keys(): countlist.append((key,worddict[key])) countlist = list(reversed(sorted(countlist,key = lambda x: x[1]))) return countlist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_file_md5(filename): """Get a file's MD5"""
if os.path.exists(filename): blocksize = 65536 try: hasher = hashlib.md5() except BaseException: hasher = hashlib.new('md5', usedForSecurity=False) with open(filename, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: # pylint: disable=len-as-condition hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest() return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_md5(string): """Get a string's MD5"""
try: hasher = hashlib.md5() except BaseException: hasher = hashlib.new('md5', usedForSecurity=False) hasher.update(string) return hasher.hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deploy_signature(source, dest, user=None, group=None): """Deploy a signature fole"""
move(source, dest) os.chmod(dest, 0644) if user and group: try: uid = pwd.getpwnam(user).pw_uid gid = grp.getgrnam(group).gr_gid os.chown(dest, uid, gid) except (KeyError, OSError): pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_local_version(sigdir, sig): """Get the local version of a signature"""
version = None filename = os.path.join(sigdir, '%s.cvd' % sig) if os.path.exists(filename): cmd = ['sigtool', '-i', filename] sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE) while True: line = sigtool.stdout.readline() if line and line.startswith('Version:'): version = line.split()[1] break if not line: break sigtool.wait() return version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verify_sigfile(sigdir, sig): """Verify a signature file"""
cmd = ['sigtool', '-i', '%s/%s.cvd' % (sigdir, sig)] sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE) ret_val = sigtool.wait() return ret_val == 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_download(obj, *args, **kwargs): """Verify a download"""
version = args[0] workdir = args[1] signame = args[2] if version: local_version = get_local_version(workdir, signame) if not verify_sigfile(workdir, signame) or version != local_version: error("[-] \033[91mFailed to verify signature: %s from: %s\033[0m" % (signame, obj.url)) raise ValueError('Failed to verify signature: %s' % signame)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_sig(opts, sig, version=None): """Download signature from hostname"""
code = None downloaded = False useagent = 'ClamAV/0.101.1 (OS: linux-gnu, ARCH: x86_64, CPU: x86_64)' manager = PoolManager( headers=make_headers(user_agent=useagent), cert_reqs='CERT_REQUIRED', ca_certs=certifi.where(), timeout=Timeout(connect=10.0, read=60.0) ) if version: path = '/%s.cvd' % sig filename = os.path.join(opts.workdir, '%s.cvd' % sig) else: path = '/%s.cdiff' % sig filename = os.path.join(opts.workdir, '%s.cdiff' % sig) try: req = manager.request('GET', 'http://%s%s' % (opts.hostname, path)) except BaseException as msg: error("Request error: %s" % msg) data = req.data code = req.status if req.status == 200: with open(filename, 'w') as handle: handle.write(data) downloaded = os.path.exists(filename) return downloaded, code
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_sig(sig, opts, isdiff): """Deploy a sig"""
info("[+] \033[92mDeploying signature:\033[0m %s" % sig) if isdiff: sourcefile = os.path.join(opts.workdir, '%s.cdiff' % sig) destfile = os.path.join(opts.mirrordir, '%s.cdiff' % sig) else: sourcefile = os.path.join(opts.workdir, '%s.cvd' % sig) destfile = os.path.join(opts.mirrordir, '%s.cvd' % sig) deploy_signature(sourcefile, destfile, opts.user, opts.group) info("=> Deployed signature: %s" % sig)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_dns_file(opts, record): """Create the DNS record file"""
info("[+] \033[92mUpdating dns.txt file\033[0m") filename = os.path.join(opts.mirrordir, 'dns.txt') localmd5 = get_file_md5(filename) remotemd5 = get_md5(record) if localmd5 != remotemd5: create_file(filename, record) info("=> dns.txt file updated") else: info("=> No update required L: %s => R: %s" % (localmd5, remotemd5))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_diffs(queue): """Download the cdiff files"""
while True: options, signature_type, localver, remotever = queue.get() for num in range(int(localver), int(remotever) + 1): sig_diff = '%s-%d' % (signature_type, num) filename = os.path.join(options.mirrordir, '%s.cdiff' % sig_diff) if not os.path.exists(filename): update_diff(options, sig_diff) queue.task_done()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def work(options): """The work functions"""
# pylint: disable=too-many-locals record = get_record(options) _, mainv, dailyv, _, _, _, safebrowsingv, bytecodev = record.split(':') versions = {'main': mainv, 'daily': dailyv, 'safebrowsing': safebrowsingv, 'bytecode': bytecodev} dqueue = Queue(maxsize=0) dqueue_workers = 3 info("[+] \033[92mStarting workers\033[0m") for index in range(dqueue_workers): info("=> Starting diff download worker: %d" % (index + 1)) worker = Thread(target=download_diffs, args=(dqueue,)) worker.setDaemon(True) worker.start() mqueue = Queue(maxsize=0) mqueue_workers = 4 for index in range(mqueue_workers): info("=> Starting signature download worker: %d" % (index + 1)) worker = Thread(target=update_sig, args=(mqueue,)) worker.setDaemon(True) worker.start() for signature_type in ['main', 'daily', 'bytecode', 'safebrowsing']: if signature_type in ['daily', 'bytecode', 'safebrowsing']: # cdiff downloads localver = get_local_version(options.mirrordir, signature_type) remotever = versions[signature_type] if localver is not None: dqueue.put( ( options, signature_type, localver, remotever ) ) mqueue.put((options, signature_type, versions)) info("=> Waiting on workers to complete tasks") dqueue.join() mqueue.join() info("=> Workers done processing queues") create_dns_file(options, record) sys.exit(0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_resource(src, dest): """ To copy package data to destination """
package_name = "yass" dest = (dest + "/" + os.path.basename(src)).rstrip("/") if pkg_resources.resource_isdir(package_name, src): if not os.path.isdir(dest): os.makedirs(dest) for res in pkg_resources.resource_listdir(__name__, src): copy_resource(src + "/" + res, dest) else: if not os.path.isfile(dest) \ and os.path.splitext(src)[1] not in [".pyc"]: with open(dest, "wb") as f: f.write(pkg_resources.resource_string(__name__, src)) else: print("File exists: %s " % dest)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def publish(endpoint, purge_files, rebuild_manifest, skip_upload): """Publish the site"""
print("Publishing site to %s ..." % endpoint.upper()) yass = Yass(CWD) target = endpoint.lower() sitename = yass.sitename if not sitename: raise ValueError("Missing site name") endpoint = yass.config.get("hosting.%s" % target) if not endpoint: raise ValueError("%s endpoint is missing in the config" % target.upper()) if target == "s3": p = publisher.S3Website(sitename=sitename, aws_access_key_id=endpoint.get("aws_access_key_id"), aws_secret_access_key=endpoint.get("aws_secret_access_key"), region=endpoint.get("aws_region")) if not p.website_exists: print(">>>") print("Setting S3 site...") if p.create_website() is True: # Need to give it enough time to create it # Should be a one time thing time.sleep(10) p.create_www_website() print("New bucket created: %s" % p.sitename) if rebuild_manifest: print(">>>") print("Rebuilding site's manifest...") p.create_manifest_from_s3_files() if purge_files is True or endpoint.get("purge_files") is True: print(">>>") print("Purging files...") exclude_files = endpoint.get("purge_exclude_files", []) p.purge_files(exclude_files=exclude_files) if not skip_upload: print(">>>") print("Uploading your site...") p.upload(yass.build_dir) else: print(">>>") print("WARNING: files upload was skipped because of the use of --skip-upload") print("") print("Yass! Your site has been successfully published to: ") print(p.website_endpoint_url) footer()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_dns(endpoint): """Setup site domain to route to static site"""
print("Setting up DNS...") yass = Yass(CWD) target = endpoint.lower() sitename = yass.sitename if not sitename: raise ValueError("Missing site name") endpoint = yass.config.get("hosting.%s" % target) if not endpoint: raise ValueError( "%s endpoint is missing in the hosting config" % target.upper()) if target == "s3": p = publisher.S3Website(sitename=sitename, aws_access_key_id=endpoint.get("aws_access_key_id"), aws_secret_access_key=endpoint.get("aws_secret_access_key"), region=endpoint.get("aws_region")) print("Setting AWS Route53 for: %s ..." % p.sitename) p.setup_dns() print("") print("Yass! Route53 setup successfully!") print("You can now visit the site at :") print(p.sitename_endpoint) footer()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_site(sitename): """Create a new site directory and init Yass"""
sitepath = os.path.join(CWD, sitename) if os.path.isdir(sitepath): print("Site directory '%s' exists already!" % sitename) else: print("Creating site: %s..." % sitename) os.makedirs(sitepath) copy_resource("skel/", sitepath) stamp_yass_current_version(sitepath) print("Site created successfully!") print("CD into '%s' and run 'yass serve' to view the site" % sitename) footer()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init(): """Initialize Yass in the current directory """
yass_conf = os.path.join(CWD, "yass.yml") if os.path.isfile(yass_conf): print("::ALERT::") print("It seems like Yass is already initialized here.") print("If it's a mistake, delete 'yass.yml' in this directory") else: print("Init Yass in %s ..." % CWD) copy_resource("skel/", CWD) stamp_yass_current_version(CWD) print("Yass init successfully!") print("Run 'yass serve' to view the site") footer()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_page(pagename): """ Create a new page Omit the extension, it will create it as .jade file """
page = pagename.lstrip("/").rstrip("/") _, _ext = os.path.splitext(pagename) # If the file doesn't have an extension, we'll just create one if not _ext or _ext == "": page += ".jade" if not page.endswith(PAGE_FORMAT): error("Can't create '%s'" % page) print("Invalid filename format") print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT)) else: engine = Yass(CWD) markup = "jade" if page.endswith(".md"): markup = "md" if page.endswith(".html"): markup = "html" dest_file = os.path.join(engine.pages_dir, page) dest_dir = os.path.dirname(dest_file) content = TPL_HEADER content += TPL_BODY[markup] if os.path.isfile(dest_file): error("File exists already") print("Location: %s" % dest_file) else: if not os.path.isdir(dest_dir): os.makedirs(dest_dir) with open(dest_file, "w") as f: f.write(content) print("New page created: '%s'" % page) print("Location: %s" % dest_file) footer()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serve(port, no_livereload, open_url): """Serve the site """
engine = Yass(CWD) if not port: port = engine.config.get("local_server.port", 8000) if no_livereload is None: no_livereload = True if engine.config.get("local_server.livereload") is False else False if open_url is None: open_url = False if engine.config.get("local_server.open_url") is False else True print("Serving at %s" % port) print("Livereload is %s" % ("OFF" if no_livereload else "ON")) def build_static(): engine.build_static() def build_pages(): engine.build_pages() engine.build() server = Server() if no_livereload is False: server.watch(engine.static_dir + "/", build_static) server.watch(engine.pages_dir + "/", build_pages) server.watch(engine.templates_dir + "/", build_pages) server.watch(engine.data_dir + "/", build_pages) server.serve(open_url_delay=open_url, port=port, root=engine.build_dir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_map_location(self): """Get the location of the player, converted to world coordinates. :return: a tuple (x, y, z). """
map_data = self.get_map() (bounds_e, bounds_n), (bounds_w, bounds_s) = map_data["continent_rect"] (map_e, map_n), (map_w, map_s) = map_data["map_rect"] assert bounds_w < bounds_e assert bounds_n < bounds_s assert map_w < map_e assert map_n < map_s meters_to_inches = 39.3701 x, y, z = self.fAvatarPosition map_x = bounds_w + ((x * meters_to_inches - map_w) / (map_e - map_w) * (bounds_e - bounds_w)) map_y = bounds_n + ((-z * meters_to_inches - map_n) / (map_s - map_n) * (bounds_s - bounds_n)) map_z = y * meters_to_inches return map_x, map_y, map_z
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CreateVertices(self, points): """ Returns a dictionary object with keys that are 2tuples represnting a point. """
gr = digraph() for z, x, Q in points: node = (z, x, Q) gr.add_nodes([node]) return gr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetFarthestNode(self, gr, node): """node is start node"""
# Remember: weights are negative distance = minmax.shortest_path_bellman_ford(gr, node)[1] # Find the farthest node, which is end of track min_key = None for key, value in distance.iteritems(): if min_key is None or value < distance[min_key]: min_key = key return min_key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def on_success(self, fn, *args, **kwargs): """ Call the given callback if or when the connected deferred succeeds. """
self._callbacks.append((fn, args, kwargs)) result = self._resulted_in if result is not _NOTHING_YET: self._succeed(result=result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _succeed(self, result): """ Fire the success chain. """
for fn, args, kwargs in self._callbacks: fn(result, *args, **kwargs) self._resulted_in = result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_config(filename): """Fetch the Configuration schema information Finds the schema file, loads the file and reads the JSON, then converts to a dictionary that is returned """
# This trick gets the directory of *this* file Configuration.py thus # allowing to find the schema files relative to this file. dir_name = get_source_dir() # Append json filename = os.path.join('json', filename) fileobj = open(os.path.join(dir_name, filename), 'r') my_dict = json.loads(fileobj.read()) return my_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populate_args_level(schema, parser): """Use a schema to populate a command line argument parser"""
for key, value in schema['properties'].iteritems(): if key == 'name': continue arg = '--%s' % key desc = value['description'] if 'type' in value: if value['type'] == 'string': if 'enum' in value: parser.add_argument(arg, help=desc, type=str, choices=value['enum']) else: parser.add_argument(arg, help=desc, type=str) elif value['type'] == 'number': parser.add_argument(arg, help=desc, type=float) elif value['type'] == 'integer': parser.add_argument(arg, help=desc, type=int) elif str(value['type']) == 'array': assert value['minItems'] == value['maxItems'] if value['items']['type'] != 'number': raise NotImplementedError("Only float arrays work") parser.add_argument(arg, help=desc, type=float, nargs=value['maxItems'], metavar='N') elif value['type'] == 'object': #group = parser.add_argument_group(key, value['description']) #populate_args_level(value, group) pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_json(self, config_json): """Permanently set the JSON configuration Unable to call twice."""
if self.configuration_dict is not None: raise RuntimeError("Can only set configuration once", self.configuration_dict) schema = fetch_config('ConfigurationSchema.json') validictory.validate(config_json, schema) config_json['name'] = self.name config_json['run_number'] = self.run config_json['src_dir'] = get_source_dir() config_json['data_dir'] = get_data_dir() config_json['log_dir'] = get_log_dir() self.configuration_dict = config_json
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_strip_ext( afile, skip_version=False, only_known_extensions=False, allow_subformat=True): """ Strip in the best way the extension from a filename. 'foo' 'foo.buz' 'foo' 'foo.buz' 'foo.buz;1' 'foo' 'foo.gif;icon' @param afile: the path/name of a file. @type afile: string @param skip_version: whether to skip a trailing ";version". @type skip_version: bool @param only_known_extensions: whether to strip out only known extensions or to consider as extension anything that follows a dot. @type only_known_extensions: bool @param allow_subformat: whether to consider also subformats as part of the extension. @type allow_subformat: bool @return: the name/path without the extension (and version). @rtype: string """
import os afile = afile.split(';') if len(afile) > 1 and allow_subformat and not afile[-1].isdigit(): afile = afile[0:-1] if len(afile) > 1 and skip_version and afile[-1].isdigit(): afile = afile[0:-1] afile = ';'.join(afile) nextfile = _extensions.sub('', afile) if nextfile == afile and not only_known_extensions: nextfile = os.path.splitext(afile)[0] while nextfile != afile: afile = nextfile nextfile = _extensions.sub('', afile) return nextfile
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def guess_extension(amimetype, normalize=False): """ Tries to guess extension for a mimetype. @param amimetype: name of a mimetype @time amimetype: string @return: the extension @rtype: string """
ext = _mimes.guess_extension(amimetype) if ext and normalize: # Normalize some common magic mis-interpreation ext = {'.asc': '.txt', '.obj': '.bin'}.get(ext, ext) from invenio.legacy.bibdocfile.api_normalizer import normalize_format return normalize_format(ext) return ext
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_magic_guesses(fullpath): """ Return all the possible guesses from the magic library about the content of the file. @param fullpath: location of the file @type fullpath: string @return: guesses about content of the file @rtype: tuple """
if CFG_HAS_MAGIC == 1: magic_cookies = _get_magic_cookies() magic_result = [] for key in magic_cookies.keys(): magic_result.append(magic_cookies[key].file(fullpath)) return tuple(magic_result) elif CFG_HAS_MAGIC == 2: magic_result = [] for key in ({'mime': False, 'mime_encoding': False}, {'mime': True, 'mime_encoding': False}, {'mime': False, 'mime_encoding': True}): magic_result.append(_magic_wrapper(fullpath, **key)) return tuple(magic_result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mimes(self): """ Returns extended MimeTypes. """
_mimes = MimeTypes(strict=False) _mimes.suffix_map.update({'.tbz2': '.tar.bz2'}) _mimes.encodings_map.update({'.bz2': 'bzip2'}) if cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES']: for key, value in iteritems( cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES']): _mimes.add_type(key, value) del key, value return _mimes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extensions(self): """ Generate the regular expression to match all the known extensions. @return: the regular expression. @rtype: regular expression object """
_tmp_extensions = self.mimes.encodings_map.keys() + \ self.mimes.suffix_map.keys() + \ self.mimes.types_map[1].keys() + \ cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS'] extensions = [] for ext in _tmp_extensions: if ext.startswith('.'): extensions.append(ext) else: extensions.append('.' + ext) extensions.sort() extensions.reverse() extensions = set([ext.lower() for ext in extensions]) extensions = '\\' + '$|\\'.join(extensions) + '$' extensions = extensions.replace('+', '\\+') return re.compile(extensions, re.I)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self, service): """ Start the service, catching and logging exceptions """
try: map(self.start_class, service.depends) if service.is_running(): return if service in self.failed: log.warning("%s previously failed to start", service) return service.start() except Exception: log.exception("Unable to start service %s", service) self.failed.add(service)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_class(self, class_): """ Start all services of a given class. If this manager doesn't already have a service of that class, it constructs one and starts it. """
matches = filter(lambda svc: isinstance(svc, class_), self) if not matches: svc = class_() self.register(svc) matches = [svc] map(self.start, matches) return matches
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def stop_class(self, class_): "Stop all services of a given class" matches = filter(lambda svc: isinstance(svc, class_), self) map(self.stop, matches)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_more_data(self, file, timeout): """ Return data from the file, if available. If no data is received by the timeout, then raise RuntimeError. """
timeout = datetime.timedelta(seconds=timeout) timer = Stopwatch() while timer.split() < timeout: data = file.read() if data: return data raise RuntimeError("Timeout")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_env(self): """ Augment the current environment providing the PYTHONUSERBASE. """
env = dict(os.environ) env.update( getattr(self, 'env', {}), PYTHONUSERBASE=self.env_path, PIP_USER="1", ) self._disable_venv(env) return env
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _disable_venv(self, env): """ Disable virtualenv and venv in the environment. """
venv = env.pop('VIRTUAL_ENV', None) if venv: venv_path, sep, env['PATH'] = env['PATH'].partition(os.pathsep)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_env(self): """ Create a PEP-370 environment """
root = path.Path(os.environ.get('SERVICES_ROOT', 'services')) self.env_path = (root / self.name).abspath() cmd = [ self.python, '-c', 'import site; print(site.getusersitepackages())', ] out = subprocess.check_output(cmd, env=self._run_env) site_packages = out.decode().strip() path.Path(site_packages).makedirs_p()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compaction(self, request_compaction=False): """Retrieve a report on, or request compaction for this instance. :param bool request_compaction: A boolean indicating whether or not to request compaction. """
url = self._service_url + 'compaction/' if request_compaction: response = requests.post(url, **self._instances._default_request_kwargs) else: response = requests.get(url, **self._instances._default_request_kwargs) return response.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_authenticated_connection(self, user, passwd, db='admin', ssl=True): """Get an authenticated connection to this instance. :param str user: The username to use for authentication. :param str passwd: The password to use for authentication. :param str db: The name of the database to authenticate against. Defaults to ``'Admin'``. :param bool ssl: Use SSL/TLS if available for this instance. Defaults to ``True``. :raises: :py:class:`pymongo.errors.OperationFailure` if authentication fails. """
# Attempt to establish an authenticated connection. try: connection = self.get_connection(ssl=ssl) connection[db].authenticate(user, passwd) return connection # Catch exception here for logging, then just re-raise. except pymongo.errors.OperationFailure as ex: logger.exception(ex) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shards(self, add_shard=False): """Get a list of shards belonging to this instance. :param bool add_shard: A boolean indicating whether to add a new shard to the specified instance. """
url = self._service_url + 'shards/' if add_shard: response = requests.post(url, **self._instances._default_request_kwargs) else: response = requests.get(url, **self._instances._default_request_kwargs) return response.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new_relic_stats(self): """ Get stats for this instance. """
if self._new_relic_stats is None: # if this is a sharded instance, fetch shard stats in parallel if self.type == 'mongodb_sharded': shards = [Shard(self.name, self._service_url + 'shards/', self._client, shard_doc) for shard_doc in self.shards().get('data')] fs = [] with futures.ThreadPoolExecutor(len(shards)) as executor: for shard in shards: fs.append(executor.submit(shard.get_shard_stats)) futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED) stats_this_second = self._rollup_shard_stats_to_instance_stats( {shard.name: future.result() for (shard, future) in zip(shards, fs)}) # power nap time.sleep(1) # fetch again fs = [] with futures.ThreadPoolExecutor(len(shards)) as executor: for shard in shards: fs.append(executor.submit(shard.get_shard_stats)) futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED) stats_next_second = self._rollup_shard_stats_to_instance_stats( {shard.name: future.result() for (shard, future) in zip(shards, fs)}) self._new_relic_stats = self._compile_new_relic_stats(stats_this_second, stats_next_second) else: # fetch stats like we did before (by hitting new_relic_stats API resource) response = requests.get('{}{}'.format(self._url, 'new-relic-stats'), **self._instances._default_request_kwargs) self._new_relic_stats = json.loads(response.content).get( 'data') if response.status_code == 200 else {} return self._new_relic_stats
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _rollup_shard_stats_to_instance_stats(self, shard_stats): """ roll up all shard stats to instance level stats :param shard_stats: dict of {shard_name: shard level stats} """
instance_stats = {} opcounters_per_node = [] # aggregate replication_lag instance_stats['replication_lag'] = max(map(lambda s: s['replication_lag'], shard_stats.values())) aggregate_server_statistics = {} for shard_name, stats in shard_stats.items(): for statistic_key in stats.get('shard_stats'): if statistic_key != 'connections' and statistic_key in aggregate_server_statistics: aggregate_server_statistics[statistic_key] = util.sum_values(aggregate_server_statistics[statistic_key], stats.get('shard_stats')[statistic_key]) else: aggregate_server_statistics[statistic_key] = stats.get('shard_stats')[statistic_key] # aggregate per_node_stats into opcounters_per_node opcounters_per_node.append({shard_name: {member: node_stats['opcounters'] for member, node_stats in stats.get('per_node_stats').items()}}) instance_stats['opcounters_per_node'] = opcounters_per_node instance_stats['aggregate_server_statistics'] = aggregate_server_statistics return instance_stats
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compile_new_relic_stats(self, stats_this_second, stats_next_second): """ from instance 'stats_this_second' and instance 'stats_next_second', compute some per second stats metrics and other aggregated metrics :param dict stats_this_second: :param dict stats_next_second: :return: compiled instance stats that has metrics 'replication_lag': 0.0, 'aggregate_database_statistics': {} } """
server_statistics_per_second = {} opcounters_per_node_per_second = [] for subdoc in ["opcounters", "network"]: first_doc = stats_this_second['aggregate_server_statistics'][subdoc] second_doc = stats_next_second['aggregate_server_statistics'][subdoc] keys = set(first_doc.keys()) | set(second_doc.keys()) server_statistics_per_second[subdoc] = {key: int(second_doc[key]) - int(first_doc[key]) for key in keys if isinstance(first_doc[key], int)} for node1, node2 in zip(stats_this_second['opcounters_per_node'], stats_next_second['opcounters_per_node']): node_opcounters_per_second = {} for repl, members in node2.items(): node_opcounters_per_second[repl] = {} for member, ops in members.items(): node_opcounters_per_second[repl][member] = {} for op, count in ops.items(): node_opcounters_per_second[repl][member][op] = count - node1[repl][member][op] opcounters_per_node_per_second.append(node_opcounters_per_second) return {'opcounters_per_node_per_second': opcounters_per_node_per_second, 'server_statistics_per_second': server_statistics_per_second, 'aggregate_server_statistics': stats_next_second.get('aggregate_server_statistics'), 'replication_lag': stats_next_second.get('replication_lag'), 'aggregate_database_statistics': self.get_aggregate_database_stats()}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_stepdown_window(self): """Get information on this instance's stepdown window."""
url = self._service_url + 'stepdown/' response = requests.get(url, **self._instances._default_request_kwargs) return response.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_stepdown_window(self, start, end, enabled=True, scheduled=True, weekly=True): """Set the stepdown window for this instance. Date times are assumed to be UTC, so use UTC date times. :param datetime.datetime start: The datetime which the stepdown window is to open. :param datetime.datetime end: The datetime which the stepdown window is to close. :param bool enabled: A boolean indicating whether or not stepdown is to be enabled. :param bool scheduled: A boolean indicating whether or not to schedule stepdown. :param bool weekly: A boolean indicating whether or not to schedule compaction weekly. """
# Ensure a logical start and endtime is requested. if not start < end: raise TypeError('Parameter "start" must occur earlier in time than "end".') # Ensure specified window is less than a week in length. week_delta = datetime.timedelta(days=7) if not ((end - start) <= week_delta): raise TypeError('Stepdown windows can not be longer than 1 week in length.') url = self._service_url + 'stepdown/' data = { 'start': int(start.strftime('%s')), 'end': int(end.strftime('%s')), 'enabled': enabled, 'scheduled': scheduled, 'weekly': weekly, } response = requests.post( url, data=json.dumps(data), **self._instances._default_request_kwargs ) return response.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def brand(self, brand): """Sets the brand of this PaymentCard. :param brand: The brand of this PaymentCard. :type: str """
allowed_values = ["visa", "mastercard", "americanExpress", "discover"] if brand is not None and brand not in allowed_values: raise ValueError( "Invalid value for `brand` ({0}), must be one of {1}" .format(brand, allowed_values) ) self._brand = brand
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def latex_quote(s): """Quote special characters for LaTeX. (Incomplete, currently only deals with underscores, dollar and hash.) """
special = {'_':r'\_', '$':r'\$', '#':r'\#'} s = str(s) for char,repl in special.items(): new = s.replace(char, repl) s = new[:] return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree_to_file(tree:'BubbleTree', outfile:str): """Compute the bubble representation of given power graph, and push it into given file."""
with open(outfile, 'w') as fd: fd.write(tree_to_bubble(tree))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lines_from_tree(tree, nodes_and_set:bool=False) -> iter: """Yield lines of bubble describing given BubbleTree"""
NODE = 'NODE\t{}' INCL = 'IN\t{}\t{}' EDGE = 'EDGE\t{}\t{}\t1.0' SET = 'SET\t{}' if nodes_and_set: for node in tree.nodes(): yield NODE.format(node) for node in tree.powernodes(): yield SET.format(node) for node, includeds in tree.inclusions.items(): for included in includeds: yield INCL.format(included, node) for node, succs in tree.edges.items(): for succ in succs: yield EDGE.format(node, succ)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_formdata(self, valuelist): """Join time string."""
if valuelist: time_str = u' '.join(valuelist) try: timetuple = time.strptime(time_str, self.format) self.data = datetime.time(*timetuple[3:6]) except ValueError: self.data = None raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_csrf_token(self, field): """Disable CRSF proection during testing."""
if current_app.testing: return super(InvenioBaseForm, self).validate_csrf_token(field)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_exchange_word_vectors( filename = "database.db", maximum_number_of_events = None ): """ Load exchange data and return dataset. """
log.info("load word vectors of database {filename}".format( filename = filename )) # Ensure that the database exists. if not os.path.isfile(filename): log.info("database {filename} nonexistent".format( filename = filename )) program.terminate() raise Exception # Access the database. database = access_database(filename = filename) # Access or create the exchanges table. table_exchanges = database["exchanges"] # Access exchanges. table_name = "exchanges" # Create a datavision dataset. data = datavision.Dataset() # progress progress = shijian.Progress() progress.engage_quick_calculation_mode() number_of_entries = len(database[table_name]) index = 0 for index_entry, entry in enumerate(database[table_name].all()): if maximum_number_of_events is not None and\ index >= int(maximum_number_of_events): log.info( "loaded maximum requested number of events " + "({maximum_number_of_events})\r".format( maximum_number_of_events = maximum_number_of_events ) ) break #unique_identifier = str(entry["id"]) utteranceWordVector = str(entry["utteranceWordVector"]) responseWordVector = str(entry["responseWordVector"]) if utteranceWordVector != "None" and responseWordVector != "None": index += 1 utteranceWordVector = eval("np." + utteranceWordVector.replace("float32", "np.float32")) responseWordVector = eval("np." + responseWordVector.replace("float32", "np.float32")) data.variable(index = index, name = "utteranceWordVector", value = utteranceWordVector) data.variable(index = index, name = "responseWordVector", value = responseWordVector ) #utteranceWordVector = list(eval("np." + utteranceWordVector.replace("float32", "np.float32"))) #responseWordVector = list(eval("np." + responseWordVector.replace("float32", "np.float32"))) #for index_component, component in enumerate(utteranceWordVector): # data.variable(index = index, name = "uwv" + str(index_component), value = component) #for index_component, component in enumerate(responseWordVector): # data.variable(index = index, name = "rwv" + str(index_component), value = component) print progress.add_datum(fraction = index_entry / number_of_entries), return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_HEP_data( ROOT_filename = "output.root", tree_name = "nominal", maximum_number_of_events = None ): """ Load HEP data and return dataset. """
ROOT_file = open_ROOT_file(ROOT_filename) tree = ROOT_file.Get(tree_name) number_of_events = tree.GetEntries() data = datavision.Dataset() progress = shijian.Progress() progress.engage_quick_calculation_mode() # counters number_of_events_loaded = 0 log.info("") index = 0 for event in tree: if maximum_number_of_events is not None and\ number_of_events_loaded >= int(maximum_number_of_events): log.info( "loaded maximum requested number of events " + "({maximum_number_of_events})\r".format( maximum_number_of_events = maximum_number_of_events ) ) break print progress.add_datum(fraction = (index + 2) / number_of_events), if select_event(event): index += 1 #event.GetReadEntry() #data.variable(index = index, name = "eventNumber", value = event.eventNumber) data.variable(index = index, name = "el_1_pt", value = event.el_pt[0]) #data.variable(index = index, name = "el_1_eta", value = event.el_eta[0]) #data.variable(index = index, name = "el_1_phi", value = event.el_phi[0]) ##data.variable(index = index, name = "jet_1_pt", value = event.jet_pt[0]) #data.variable(index = index, name = "jet_1_eta", value = event.jet_eta[0]) #data.variable(index = index, name = "jet_1_phi", value = event.jet_phi[0]) ##data.variable(index = index, name = "jet_1_e", value = event.jet_e[0]) ##data.variable(index = index, name = "jet_2_pt", value = event.jet_pt[1]) #data.variable(index = index, name = "jet_2_eta", value = event.jet_eta[1]) #data.variable(index = index, name = "jet_2_phi", value = event.jet_phi[1]) ##data.variable(index = index, name = "jet_2_e", value = event.jet_e[1]) #data.variable(index = index, name = "nJets", value = event.nJets) ##data.variable(index = index, name = "nBTags", value = event.nBTags) ##data.variable(index = index, name = "nLjets", value = event.nLjets) ##data.variable(index = index, name = "ljet_1_m", value = event.ljet_m[0]) #data.variable(index = index, name = "met", value = event.met_met) #data.variable(index = index, name = "met_phi", value = event.met_phi) #data.variable(index = index, name = "Centrality_all", value = event.Centrality_all) #data.variable(index = index, name = "Mbb_MindR", value = event.Mbb_MindR) #data.variable(index = index, name = "ljet_tau21", value = event.ljet_tau21), #data.variable(index = index, name = "ljet_tau32", value = event.ljet_tau32), #data.variable(index = index, name = "Aplan_bjets", value = event.Aplan_bjets), #data.variable(index = index, name = "H4_all", value = event.H4_all), #data.variable(index = index, name = "NBFricoNN_6jin4bin", value = event.NBFricoNN_6jin4bin), #data.variable(index = index, name = "NBFricoNN_6jin3bex", value = event.NBFricoNN_6jin3bex), #data.variable(index = index, name = "NBFricoNN_5jex4bin", value = event.NBFricoNN_5jex4bin), #data.variable(index = index, name = "NBFricoNN_3jex3bex", value = event.NBFricoNN_3jex3bex), #data.variable(index = index, name = "NBFricoNN_4jin3bex", value = event.NBFricoNN_4jin3bex), #data.variable(index = index, name = "NBFricoNN_4jin4bin", value = event.NBFricoNN_4jin4bin) number_of_events_loaded += 1 log.info("") return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sentiment( text = None, confidence = False ): """ This function accepts a string text input. It calculates the sentiment of the text, "pos" or "neg". By default, it returns this calculated sentiment. If selected, it returns a tuple of the calculated sentiment and the classificaton confidence. """
try: words = text.split(" ") # Remove empty strings. words = [word for word in words if word] features = word_features(words) classification = classifier.classify(features) confidence_classification = classifier.prob_classify(features).prob(classification) except: classification = None confidence_classification = None if confidence: return ( classification, confidence_classification ) else: return classification
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def usernames( self ): """ This function returns the list of unique usernames corresponding to the tweets stored in self. """
try: return list(set([tweet.username for tweet in self])) except: log.error("error -- possibly a problem with tweets stored")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_sentiments( self, username = None ): """ This function returns a list of all sentiments of the tweets of a specified user. """
try: return [tweet.sentiment for tweet in self if tweet.username == username] except: log.error("error -- possibly no username specified") return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_sentiments_most_frequent( self, username = None, single_most_frequent = True ): """ This function returns the most frequent calculated sentiments expressed in tweets of a specified user. By default, the single most frequent sentiment is returned. All sentiments with their corresponding frequencies can be returned also. """
try: sentiment_frequencies = collections.Counter(self.user_sentiments( username = username )) if single_most_frequent: return sentiment_frequencies.most_common(1)[0][0] else: return dict(sentiment_frequencies) except: log.error("error -- possibly no username specified") return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def users_sentiments_single_most_frequent( self, usernames = None, ): """ This function returns the single most frequent calculated sentiment expressed by all stored users or by a list of specified users as a dictionary. """
users_sentiments_single_most_frequent = dict() if usernames is None: usernames = self.usernames() try: for username in usernames: sentiment = self.user_sentiments_most_frequent( username = username, single_most_frequent = True ) users_sentiments_single_most_frequent[username] = sentiment return users_sentiments_single_most_frequent except: log.error("error -- possibly a problem with tweets stored") return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_progress(i, n): """Returns string containing a progress bar, a percentage, etc."""
if n == 0: fraction = 0 else: fraction = float(i)/n LEN_BAR = 25 num_plus = int(round(fraction*LEN_BAR)) s_plus = '+'*num_plus s_point = '.'*(LEN_BAR-num_plus) return '[{0!s}{1!s}] {2:d}/{3:d} - {4:.1f}%'.format(s_plus, s_point, i, n, fraction*100)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _format_exe_info(py_len, exeinfo, format, indlevel): """Renders ExeInfo object in specified format"""
ret = [] ind = " " * indlevel * NIND if format.startswith("text") else "" if format == "markdown-list": for si in exeinfo: ret.append(" - `{0!s}`: {1!s}".format(si.filename, si.description)) if format == "rest-list": for si in exeinfo: ret.append("* ``{0!s}``: {1!s}".format(si.filename, si.description)) elif format == "markdown-table": mask = "%-{0:d}s | %s".format(py_len+2 ) ret.append(mask % ("Script name", "Purpose")) ret.append("-" * (py_len + 3) + "|" + "-" * 10) for si in exeinfo: ret.append(mask % ("`{0!s}`".format(si.filename), si.description)) elif format == "text": sbc = 1 # spaces between columns for si in exeinfo: ss = textwrap.wrap(si.description, 79 - py_len - sbc - indlevel*NIND) for i, s in enumerate(ss): if i == 0: filecolumn = si.filename + " " + ("." * (py_len - len(si.filename))) else: filecolumn = " " * (py_len + 1) ret.append("{}{}{}{}".format(ind, filecolumn, " "*sbc, s)) ret.append("") return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _map_relation(c, language='any'): """ Map related concept or collection, leaving out the relations. :param c: the concept or collection to map :param string language: Language to render the relation's label in :rtype: :class:`dict` """
label = c.label(language) return { 'id': c.id, 'type': c.type, 'uri': c.uri, 'label': label.label if label else None }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_indicators(self, indicators=list(), private=False, tags=list()): """Add indicators to the remote instance."""
if len(indicators) == 0: raise Exception("No indicators were identified.") self.logger.debug("Checking {} indicators".format(len(indicators))) cleaned = clean_indicators(indicators) self.logger.debug("Cleaned {} indicators".format(len(cleaned))) whitelisted = check_whitelist(cleaned) self.logger.debug("Non-whitelisted {} indicators".format(len(whitelisted))) indicators = prune_cached(whitelisted) hashed = hash_values(indicators) self.logger.debug("Non-cached {} indicators".format(len(indicators))) self.logger.debug("Processing {} indicators".format(len(indicators))) request_count = int(math.ceil(len(indicators)/100.0)) if request_count == 0: mesg = "[!] No indicators were left to process after " mesg += "cleaning, whitelisting and checking the cache." return {'message': mesg} stats = {'success': 0, 'failure': 0, 'requests': request_count, 'written': 0} mesg = "{} indicators found, making {} requests" self.logger.debug(mesg.format(len(indicators), request_count)) if private: indicators = hashed if type(tags) == str: tags = [t.strip().lower() for t in tags.split(',')] start, end = (0, 100) for i, idx in enumerate(range(0, request_count)): if idx > 0: time.sleep(3) # Ensure we never trip the limit self.logger.debug("Waiting 3 seconds before next request.") to_send = {'indicators': indicators[start:end], 'tags': tags} r = self._send_data('POST', 'admin', 'add-indicators', to_send) start, end = (end, end + 100) if not r['success']: stats['failure'] += 1 continue stats['success'] += 1 stats['written'] += r['writeCount'] cache_items(to_send['indicators']) msg = "" msg += "{written} indicators written using {requests} requests: " msg += "{success} success, {failure} failure" stats['message'] = msg.format(**stats) return stats
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_indicators(self): """List indicators available on the remote instance."""
response = self._get('', 'get-indicators') response['message'] = "%i indicators:\n%s" % ( len(response['indicators']), "\n".join(response['indicators']) ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def besttype(x, encoding="utf-8", percentify=True): """Convert string x to the most useful type, i.e. int, float or unicode string. If x is a quoted string (single or double quotes) then the quotes are stripped and the enclosed string returned. The string can contain any number of quotes, it is only important that it begins and ends with either single or double quotes. *percentify* = ``True`` turns "34.4%" into the float 0.344. .. Note:: Strings will be returned as Unicode strings (using :func:`unicode`), based on the *encoding* argument, which is utf-8 by default. """
def unicodify(x): return to_unicode(x, encoding) def percent(x): try: if x.endswith("%"): x = float(x[:-1]) / 100. else: raise ValueError except (AttributeError, ValueError): raise ValueError return x x = unicodify(x) # make unicode as soon as possible try: x = x.strip() except AttributeError: pass m = re.match(r"""(?P<quote>['"])(?P<value>.*)(?P=quote)$""", x) # matches "<value>" or '<value>' where <value> COULD contain " or '! if m is None: # not a quoted string, try different types for converter in int, float, percent, unicodify: # try them in increasing order of lenience try: return converter(x) except ValueError: pass else: # quoted string x = unicodify(m.group('value')) return x
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _onmessage(cls, kmsg): """ Call on received message :param kser.schemas.Message kmsg: Kafka message :return: Kafka message :rtype: kser.schemas.Message """
logger.debug( "{}.ReceivedMessage {}[{}]".format( cls.__name__, kmsg.entrypoint, kmsg.uuid ), extra=dict(kmsg=kmsg.dump()) ) return cls.onmessage(kmsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(cls, name, entrypoint): """ Register a new entrypoint :param str name: Key used by messages :param kser.entry.Entrypoint entrypoint: class to load :raises ValidationError: Invalid entry """
if not issubclass(entrypoint, Entrypoint): raise ValidationError( "Invalid type for entry '{}', MUST implement " "kser.entry.Entrypoint".format(name), extra=dict(entrypoint=name) ) cls.ENTRYPOINTS[name] = entrypoint logger.debug("{}.Registered: {}".format(cls.__name__, name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(cls, raw_data): """description of run"""
logger.debug("{}.ReceivedFromKafka: {}".format( cls.__name__, raw_data )) try: kmsg = cls._onmessage(cls.TRANSPORT.loads(raw_data)) except Exception as exc: logger.error( "{}.ImportError: Failed to load data from kafka: {}".format( cls.__name__, exc ), extra=dict(kafka_raw_data=raw_data) ) return Result.from_exception(exc) try: cls.start_processing(kmsg) if kmsg.entrypoint not in cls.ENTRYPOINTS: raise ValidationError( "Entrypoint '{}' not registred".format(kmsg.entrypoint), extra=dict( uuid=kmsg.uuid, entrypoint=kmsg.entrypoint, allowed=list(cls.ENTRYPOINTS.keys()) ) ) result = cls.ENTRYPOINTS[kmsg.entrypoint].from_Message( kmsg ).execute() except Exception as exc: result = Result.from_exception(exc, kmsg.uuid) finally: cls.stop_processing() # noinspection PyUnboundLocalVariable if result and result.retcode < 300: return cls._onsuccess(kmsg=kmsg, result=result) else: return cls._onerror(kmsg=kmsg, result=result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deactivate(self, node_id): """Deactivate the node identified by node_id. Deactivates the node corresponding to node_id, which means that it can never be the output of a nearest_point query. Note: The node is not removed from the tree, its data is steel available. Args: node_id (int): The node identifier (given to the user after its insertion). """
node = self.node_list[node_id] self.node_list[node_id] = node._replace(active=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(self, point, data=None): """Insert a new node in the tree. Args: point (:obj:`tuple` of float or int): Stores the position of the node. data (:obj, optional): The information stored by the node. Returns: int: The identifier of the new node. Example: """
assert len(point) == self.k if self.size == 0: if self.region is None: self.region = [[-math.inf, math.inf]] * self.k axis = 0 return self.new_node(point, self.region, axis, data) # Iteratively descends to one leaf current_id = 0 while True: parent_node = self.node_list[current_id] axis = parent_node.axis if point[axis] < parent_node.point[axis]: next_id, left = parent_node.left, True else: next_id, left = parent_node.right, False if next_id is None: break current_id = next_id # Get the region delimited by the parent node region = parent_node.region[:] region[axis] = parent_node.region[axis][:] # Limit to the child's region limit = parent_node.point[axis] # Update reference to the new node if left: self.node_list[current_id] = parent_node._replace(left=self.size) region[axis][1] = limit else: self.node_list[current_id] = parent_node._replace(right=self.size) region[axis][0] = limit return self.new_node(point, region, (axis + 1) % self.k, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_to_public(self, request, queryset): """ Set one or several releases to public """
queryset.update(is_public=True, modified=now())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loads(cls, json_data): """description of load"""
try: return cls(**cls.MARSHMALLOW_SCHEMA.loads(json_data)) except marshmallow.exceptions.ValidationError as exc: raise ValidationError("Failed to load message", extra=exc.args[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format(self, response): """ Format the data. In derived classes, it is usually better idea to override ``_format_data()`` than this method. :param response: devil's ``Response`` object or the data itself. May also be ``None``. :return: django's ``HttpResponse`` todo: this shouldn't change the given response. only return the formatted response. """
res = self._prepare_response(response) res.content = self._format_data(res.content, self.charset) return self._finalize_response(res)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self, data, charset=None): """ Parse the data. It is usually a better idea to override ``_parse_data()`` than this method in derived classes. :param charset: the charset of the data. Uses datamapper's default (``self.charset``) if not given. :returns: """
charset = charset or self.charset return self._parse_data(data, charset)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _decode_data(self, data, charset): """ Decode string data. :returns: unicode string """
try: return smart_unicode(data, charset) except UnicodeDecodeError: raise errors.BadRequest('wrong charset')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_data(self, data, charset): """ Parse the data :param data: the data (may be None) """
return self._decode_data(data, charset) if data else u''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _finalize_response(self, response): """ Convert the ``Response`` object into django's ``HttpResponse`` :return: django's ``HttpResponse`` """
res = HttpResponse(content=response.content, content_type=self._get_content_type()) # status_code is set separately to allow zero res.status_code = response.code return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_mapper(self, mapper, content_type, shortname=None): """ Register new mapper. :param mapper: mapper object needs to implement ``parse()`` and ``format()`` functions. """
self._check_mapper(mapper) cont_type_names = self._get_content_type_names(content_type, shortname) self._datamappers.update(dict([(name, mapper) for name in cont_type_names]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select_formatter(self, request, resource): """ Select appropriate formatter based on the request. :param request: the HTTP request :param resource: the invoked resource """
# 1. get from resource if resource.mapper: return resource.mapper # 2. get from url mapper_name = self._get_name_from_url(request) if mapper_name: return self._get_mapper(mapper_name) # 3. get from accept header mapper_name = self._get_name_from_accept(request) if mapper_name: return self._get_mapper(mapper_name) # 4. use resource's default if resource.default_mapper: return resource.default_mapper # 5. use manager's default return self._get_default_mapper()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select_parser(self, request, resource): """ Select appropriate parser based on the request. :param request: the HTTP request :param resource: the invoked resource """
# 1. get from resource if resource.mapper: return resource.mapper # 2. get from content type mapper_name = self._get_name_from_content_type(request) if mapper_name: return self._get_mapper(mapper_name) # 3. get from url mapper_name = self._get_name_from_url(request) if mapper_name: return self._get_mapper(mapper_name) # 4. use resource's default if resource.default_mapper: return resource.default_mapper # 5. use manager's default return self._get_default_mapper()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mapper_by_content_type(self, content_type): """ Returs mapper based on the content type. """
content_type = util.strip_charset(content_type) return self._get_mapper(content_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_mapper(self, mapper_name): """ Return the mapper based on the given name. :returns: the mapper based on the given ``mapper_name`` :raises: NotAcceptable if we don't support the requested format. """
if mapper_name in self._datamappers: # mapper found return self._datamappers[mapper_name] else: # unsupported format return self._unknown_format(mapper_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_name_from_content_type(self, request): """ Get name from Content-Type header """
content_type = request.META.get('CONTENT_TYPE', None) if content_type: # remove the possible charset-encoding info return util.strip_charset(content_type) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_name_from_accept(self, request): """ Process the Accept HTTP header. Find the most suitable mapper that the client wants and we support. :returns: the preferred mapper based on the accept header or ``None``. """
accepts = util.parse_accept_header(request.META.get("HTTP_ACCEPT", "")) if not accepts: return None for accept in accepts: if accept[0] in self._datamappers: return accept[0] raise errors.NotAcceptable()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_name_from_url(self, request): """ Determine short name for the mapper based on the URL. Short name can be either in query string (e.g. ?format=json) or as an extension to the URL (e.g. myresource.json). :returns: short name of the mapper or ``None`` if not found. """
format = request.GET.get('format', None) if not format: match = self._format_query_pattern.match(request.path) if match and match.group('format'): format = match.group('format') return format