_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q264900
Nemo.register
validation
def register(self): """ Register the app using Blueprint :return: Nemo blueprint :rtype: flask.Blueprint """ if self.app is not None: if not self.blueprint: self.blueprint = self.create_blueprint() self.app.register_blueprint(self.blueprint) if self.cache is None: # We register a fake cache extension. setattr(self.app.jinja_env, "_fake_cache_extension", self) self.app.jinja_env.add_extension(FakeCacheExtension) return self.blueprint return None
python
{ "resource": "" }
q264901
Nemo.register_filters
validation
def register_filters(self): """ Register filters for Jinja to use .. note:: Extends the dictionary filters of jinja_env using self._filters list """ for _filter, instance in self._filters: if not instance: self.app.jinja_env.filters[ _filter.replace("f_", "") ] = getattr(flask_nemo.filters, _filter) else: self.app.jinja_env.filters[ _filter.replace("f_", "") ] = getattr(instance, _filter.replace("_{}".format(instance.name), ""))
python
{ "resource": "" }
q264902
Nemo.register_plugins
validation
def register_plugins(self): """ Register plugins in Nemo instance - Clear routes first if asked by one plugin - Clear assets if asked by one plugin and replace by the last plugin registered static_folder - Register each plugin - Append plugin routes to registered routes - Append plugin filters to registered filters - Append templates directory to given namespaces - Append assets (CSS, JS, statics) to given resources - Append render view (if exists) to Nemo.render stack """ if len([plugin for plugin in self.__plugins__.values() if plugin.clear_routes]) > 0: # Clear current routes self._urls = list() self.cached = list() clear_assets = [plugin for plugin in self.__plugins__.values() if plugin.clear_assets] if len(clear_assets) > 0 and not self.prevent_plugin_clearing_assets: # Clear current Assets self.__assets__ = copy(type(self).ASSETS) static_path = [plugin.static_folder for plugin in clear_assets if plugin.static_folder] if len(static_path) > 0: self.static_folder = static_path[-1] for plugin in self.__plugins__.values(): self._urls.extend([(url, function, methods, plugin) for url, function, methods in plugin.routes]) self._filters.extend([(filt, plugin) for filt in plugin.filters]) self.__templates_namespaces__.extend( [(namespace, directory) for namespace, directory in plugin.templates.items()] ) for asset_type in self.__assets__: for key, value in plugin.assets[asset_type].items(): self.__assets__[asset_type][key] = value if plugin.augment: self.__plugins_render_views__.append(plugin) if hasattr(plugin, "CACHED"): for func in plugin.CACHED: self.cached.append((getattr(plugin, func), plugin)) plugin.register_nemo(self)
python
{ "resource": "" }
q264903
Nemo.chunk
validation
def chunk(self, text, reffs): """ Handle a list of references depending on the text identifier using the chunker dictionary. :param text: Text object from which comes the references :type text: MyCapytains.resources.texts.api.Text :param reffs: List of references to transform :type reffs: References :return: Transformed list of references :rtype: [str] """ if str(text.id) in self.chunker: return self.chunker[str(text.id)](text, reffs) return self.chunker["default"](text, reffs)
python
{ "resource": "" }
q264904
add_tag
validation
def add_tag(): """ Obtains the data from the pipe and appends the given tag. """ if len(sys.argv) > 1: tag = sys.argv[1] doc_mapper = DocMapper() if doc_mapper.is_pipe: count = 0 for obj in doc_mapper.get_pipe(): obj.add_tag(tag) obj.update(tags=obj.tags) count += 1 print_success("Added tag '{}' to {} object(s)".format(tag, count)) else: print_error("Please use this script with pipes") else: print_error("Usage: jk-add-tag <tag>") sys.exit()
python
{ "resource": "" }
q264905
Config.set
validation
def set(self, section, key, value): """ Creates the section value if it does not exists and sets the value. Use write_config to actually set the value. """ if not section in self.config: self.config.add_section(section) self.config.set(section, key, value)
python
{ "resource": "" }
q264906
Config.get
validation
def get(self, section, key): """ This function tries to retrieve the value from the configfile otherwise will return a default. """ try: return self.config.get(section, key) except configparser.NoSectionError: pass except configparser.NoOptionError: pass return self.defaults[section][key]
python
{ "resource": "" }
q264907
Config.config_dir
validation
def config_dir(self): """ Returns the configuration directory """ home = expanduser('~') config_dir = os.path.join(home, '.jackal') return config_dir
python
{ "resource": "" }
q264908
Config.write_config
validation
def write_config(self, initialize_indices=False): """ Write the current config to disk to store them. """ if not os.path.exists(self.config_dir): os.mkdir(self.config_dir) with open(self.config_file, 'w') as configfile: self.config.write(configfile) if initialize_indices: index = self.get('jackal', 'index') from jackal import Host, Range, Service, User, Credential, Log from jackal.core import create_connection create_connection(self) Host.init(index="{}-hosts".format(index)) Range.init(index="{}-ranges".format(index)) Service.init(index="{}-services".format(index)) User.init(index="{}-users".format(index)) Credential.init(index="{}-creds".format(index)) Log.init(index="{}-log".format(index))
python
{ "resource": "" }
q264909
ensure_remote_branch_is_tracked
validation
def ensure_remote_branch_is_tracked(branch): """Track the specified remote branch if it is not already tracked.""" if branch == MASTER_BRANCH: # We don't need to explicitly track the master branch, so we're done. return # Ensure the specified branch is in the local branch list. output = subprocess.check_output(['git', 'branch', '--list']) for line in output.split('\n'): if line.strip() == branch: # We are already tracking the remote branch break else: # We are not tracking the remote branch, so track it. try: sys.stdout.write(subprocess.check_output( ['git', 'checkout', '--track', 'origin/%s' % branch])) except subprocess.CalledProcessError: # Bail gracefully. raise SystemExit(1)
python
{ "resource": "" }
q264910
main
validation
def main(branch): """Checkout, update and branch from the specified branch.""" try: # Ensure that we're in a git repository. This command is silent unless # you're not actually in a git repository, in which case, you receive a # "Not a git repository" error message. output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8') sys.stdout.write(output) except subprocess.CalledProcessError: # Bail if we're not in a git repository. return # This behavior ensures a better user experience for those that aren't # intimately familiar with git. ensure_remote_branch_is_tracked(branch) # Switch to the specified branch and update it. subprocess.check_call(['git', 'checkout', '--quiet', branch]) # Pulling is always safe here, because we never commit to this branch. subprocess.check_call(['git', 'pull', '--quiet']) # Checkout the top commit in the branch, effectively going "untracked." subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch]) # Clean up the repository of Python cruft. Because we've just switched # branches and compiled Python files should not be version controlled, # there are likely leftover compiled Python files sitting on disk which may # confuse some tools, such as sqlalchemy-migrate. subprocess.check_call(['find', '.', '-name', '"*.pyc"', '-delete']) # For the sake of user experience, give some familiar output. print('Your branch is up to date with branch \'origin/%s\'.' % branch)
python
{ "resource": "" }
q264911
get_interface_name
validation
def get_interface_name(): """ Returns the interface name of the first not link_local and not loopback interface. """ interface_name = '' interfaces = psutil.net_if_addrs() for name, details in interfaces.items(): for detail in details: if detail.family == socket.AF_INET: ip_address = ipaddress.ip_address(detail.address) if not (ip_address.is_link_local or ip_address.is_loopback): interface_name = name break return interface_name
python
{ "resource": "" }
q264912
Spoofing.load_targets
validation
def load_targets(self): """ load_targets will load the services with smb signing disabled and if ldap is enabled the services with the ldap port open. """ ldap_services = [] if self.ldap: ldap_services = self.search.get_services(ports=[389], up=True) self.ldap_strings = ["ldap://{}".format(service.address) for service in ldap_services] self.services = self.search.get_services(tags=['smb_signing_disabled']) self.ips = [str(service.address) for service in self.services]
python
{ "resource": "" }
q264913
Spoofing.write_targets
validation
def write_targets(self): """ write_targets will write the contents of ips and ldap_strings to the targets_file. """ if len(self.ldap_strings) == 0 and len(self.ips) == 0: print_notification("No targets left") if self.auto_exit: if self.notifier: self.notifier.stop() self.terminate_processes() with open(self.targets_file, 'w') as f: f.write('\n'.join(self.ldap_strings + self.ips))
python
{ "resource": "" }
q264914
Spoofing.start_processes
validation
def start_processes(self): """ Starts the ntlmrelayx.py and responder processes. Assumes you have these programs in your path. """ self.relay = subprocess.Popen(['ntlmrelayx.py', '-6', '-tf', self.targets_file, '-w', '-l', self.directory, '-of', self.output_file], cwd=self.directory) self.responder = subprocess.Popen(['responder', '-I', self.interface_name])
python
{ "resource": "" }
q264915
Spoofing.callback
validation
def callback(self, event): """ Function that gets called on each event from pyinotify. """ # IN_CLOSE_WRITE -> 0x00000008 if event.mask == 0x00000008: if event.name.endswith('.json'): print_success("Ldapdomaindump file found") if event.name in ['domain_groups.json', 'domain_users.json']: if event.name == 'domain_groups.json': self.domain_groups_file = event.pathname if event.name == 'domain_users.json': self.domain_users_file = event.pathname if self.domain_groups_file and self.domain_users_file: print_success("Importing users") subprocess.Popen(['jk-import-domaindump', self.domain_groups_file, self.domain_users_file]) elif event.name == 'domain_computers.json': print_success("Importing computers") subprocess.Popen(['jk-import-domaindump', event.pathname]) # Ldap has been dumped, so remove the ldap targets. self.ldap_strings = [] self.write_targets() if event.name.endswith('_samhashes.sam'): host = event.name.replace('_samhashes.sam', '') # TODO import file. print_success("Secretsdump file, host ip: {}".format(host)) subprocess.Popen(['jk-import-secretsdump', event.pathname]) # Remove this system from this ip list. self.ips.remove(host) self.write_targets()
python
{ "resource": "" }
q264916
Spoofing.watch
validation
def watch(self): """ Watches directory for changes """ wm = pyinotify.WatchManager() self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback) wm.add_watch(self.directory, pyinotify.ALL_EVENTS) try: self.notifier.loop() except (KeyboardInterrupt, AttributeError): print_notification("Stopping") finally: self.notifier.stop() self.terminate_processes()
python
{ "resource": "" }
q264917
Spoofing.terminate_processes
validation
def terminate_processes(self): """ Terminate the processes. """ if self.relay: self.relay.terminate() if self.responder: self.responder.terminate()
python
{ "resource": "" }
q264918
Spoofing.wait
validation
def wait(self): """ This function waits for the relay and responding processes to exit. Captures KeyboardInterrupt to shutdown these processes. """ try: self.relay.wait() self.responder.wait() except KeyboardInterrupt: print_notification("Stopping") finally: self.terminate_processes()
python
{ "resource": "" }
q264919
QueryPrototype.getAnnotations
validation
def getAnnotations(self, targets, wildcard=".", include=None, exclude=None, limit=None, start=1, expand=False, **kwargs): """ Retrieve annotations from the query provider :param targets: The CTS URN(s) to query as the target of annotations :type targets: [MyCapytain.common.reference.URN], URN or None :param wildcard: Wildcard specifier for how to match the URN :type wildcard: str :param include: URI(s) of Annotation types to include in the results :type include: list(str) :param exclude: URI(s) of Annotation types to include in the results :type exclude: list(str) :param limit: The max number of results to return (Default is None for no limit) :type limit: int :param start: the starting record to return (Default is 1) :type start: int :param expand: Flag to state whether Annotations are expanded (Default is False) :type expand: bool :return: Tuple representing the query results. The first element The first element is the number of total Annotations found The second element is the list of Annotations :rtype: (int, list(Annotation) .. note:: Wildcard should be one of the following value - '.' to match exact, - '.%' to match exact plus lower in the hierarchy - '%.' to match exact + higher in the hierarchy - '-' to match in the range - '%.%' to match all """ return 0, []
python
{ "resource": "" }
q264920
Breadcrumb.render
validation
def render(self, **kwargs): """ Make breadcrumbs for a route :param kwargs: dictionary of named arguments used to construct the view :type kwargs: dict :return: List of dict items the view can use to construct the link. :rtype: {str: list({ "link": str, "title", str, "args", dict})} """ breadcrumbs = [] # this is the list of items we want to accumulate in the breadcrumb trail. # item[0] is the key into the kwargs["url"] object and item[1] is the name of the route # setting a route name to None means that it's needed to construct the route of the next item in the list # but shouldn't be included in the list itself (this is currently the case for work -- # at some point we probably should include work in the navigation) breadcrumbs = [] if "collections" in kwargs: breadcrumbs = [{ "title": "Text Collections", "link": ".r_collections", "args": {} }] if "parents" in kwargs["collections"]: breadcrumbs += [ { "title": parent["label"], "link": ".r_collection_semantic", "args": { "objectId": parent["id"], "semantic": f_slugify(parent["label"]), }, } for parent in kwargs["collections"]["parents"] ][::-1] if "current" in kwargs["collections"]: breadcrumbs.append({ "title": kwargs["collections"]["current"]["label"], "link": None, "args": {} }) # don't link the last item in the trail if len(breadcrumbs) > 0: breadcrumbs[-1]["link"] = None return {"breadcrumbs": breadcrumbs}
python
{ "resource": "" }
q264921
main
validation
def main(): """ This function obtains hosts from core and starts a nessus scan on these hosts. The nessus tag is appended to the host tags. """ config = Config() core = HostSearch() hosts = core.get_hosts(tags=['!nessus'], up=True) hosts = [host for host in hosts] host_ips = ",".join([str(host.address) for host in hosts]) url = config.get('nessus', 'host') access = config.get('nessus', 'access_key') secret = config.get('nessus', 'secret_key') template_name = config.get('nessus', 'template_name') nessus = Nessus(access, secret, url, template_name) scan_id = nessus.create_scan(host_ips) nessus.start_scan(scan_id) for host in hosts: host.add_tag('nessus') host.save() Logger().log("nessus", "Nessus scan started on {} hosts".format(len(hosts)), {'scanned_hosts': len(hosts)})
python
{ "resource": "" }
q264922
Nessus.get_template_uuid
validation
def get_template_uuid(self): """ Retrieves the uuid of the given template name. """ response = requests.get(self.url + 'editor/scan/templates', headers=self.headers, verify=False) templates = json.loads(response.text) for template in templates['templates']: if template['name'] == self.template_name: return template['uuid']
python
{ "resource": "" }
q264923
Nessus.create_scan
validation
def create_scan(self, host_ips): """ Creates a scan with the given host ips Returns the scan id of the created object. """ now = datetime.datetime.now() data = { "uuid": self.get_template_uuid(), "settings": { "name": "jackal-" + now.strftime("%Y-%m-%d %H:%M"), "text_targets": host_ips } } response = requests.post(self.url + 'scans', data=json.dumps(data), verify=False, headers=self.headers) if response: result = json.loads(response.text) return result['scan']['id']
python
{ "resource": "" }
q264924
Nessus.start_scan
validation
def start_scan(self, scan_id): """ Starts the scan identified by the scan_id.s """ requests.post(self.url + 'scans/{}/launch'.format(scan_id), verify=False, headers=self.headers)
python
{ "resource": "" }
q264925
cmpToDataStore_uri
validation
def cmpToDataStore_uri(base, ds1, ds2): '''Bases the comparison of the datastores on URI alone.''' ret = difflib.get_close_matches(base.uri, [ds1.uri, ds2.uri], 1, cutoff=0.5) if len(ret) <= 0: return 0 if ret[0] == ds1.uri: return -1 return 1
python
{ "resource": "" }
q264926
JackalDoc.add_tag
validation
def add_tag(self, tag): """ Adds a tag to the list of tags and makes sure the result list contains only unique results. """ self.tags = list(set(self.tags or []) | set([tag]))
python
{ "resource": "" }
q264927
JackalDoc.remove_tag
validation
def remove_tag(self, tag): """ Removes a tag from this object """ self.tags = list(set(self.tags or []) - set([tag]))
python
{ "resource": "" }
q264928
JackalDoc.to_dict
validation
def to_dict(self, include_meta=False): """ Returns the result as a dictionary, provide the include_meta flag to als show information like index and doctype. """ result = super(JackalDoc, self).to_dict(include_meta=include_meta) if include_meta: source = result.pop('_source') return {**result, **source} else: return result
python
{ "resource": "" }
q264929
AnnotationsApiPlugin.r_annotations
validation
def r_annotations(self): """ Route to retrieve annotations by target :param target_urn: The CTS URN for which to retrieve annotations :type target_urn: str :return: a JSON string containing count and list of resources :rtype: {str: Any} """ target = request.args.get("target", None) wildcard = request.args.get("wildcard", ".", type=str) include = request.args.get("include") exclude = request.args.get("exclude") limit = request.args.get("limit", None, type=int) start = request.args.get("start", 1, type=int) expand = request.args.get("expand", False, type=bool) if target: try: urn = MyCapytain.common.reference.URN(target) except ValueError: return "invalid urn", 400 count, annotations = self.__queryinterface__.getAnnotations(urn, wildcard=wildcard, include=include, exclude=exclude, limit=limit, start=start, expand=expand) else: # Note that this implementation is not done for too much annotations # because we do not implement pagination here count, annotations = self.__queryinterface__.getAnnotations(None, limit=limit, start=start, expand=expand) mapped = [] response = { "@context": type(self).JSONLD_CONTEXT, "id": url_for(".r_annotations", start=start, limit=limit), "type": "AnnotationCollection", "startIndex": start, "items": [ ], "total": count } for a in annotations: mapped.append({ "id": url_for(".r_annotation", sha=a.sha), "body": url_for(".r_annotation_body", sha=a.sha), "type": "Annotation", "target": a.target.to_json(), "dc:type": a.type_uri, "owl:sameAs": [a.uri], "nemo:slug": a.slug }) response["items"] = mapped response = jsonify(response) return response
python
{ "resource": "" }
q264930
Enum.lookup
validation
def lookup(cls, key, get=False): """Returns the label for a given Enum key""" if get: item = cls._item_dict.get(key) return item.name if item else key return cls._item_dict[key].name
python
{ "resource": "" }
q264931
Enum.verbose
validation
def verbose(cls, key=False, default=''): """Returns the verbose name for a given enum value""" if key is False: items = cls._item_dict.values() return [(x.key, x.value) for x in sorted(items, key=lambda x:x.sort or x.key)] item = cls._item_dict.get(key) return item.value if item else default
python
{ "resource": "" }
q264932
get_configured_dns
validation
def get_configured_dns(): """ Returns the configured DNS servers with the use f nmcli. """ ips = [] try: output = subprocess.check_output(['nmcli', 'device', 'show']) output = output.decode('utf-8') for line in output.split('\n'): if 'DNS' in line: pattern = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" for hit in re.findall(pattern, line): ips.append(hit) except FileNotFoundError: pass return ips
python
{ "resource": "" }
q264933
zone_transfer
validation
def zone_transfer(address, dns_name): """ Tries to perform a zone transfer. """ ips = [] try: print_notification("Attempting dns zone transfer for {} on {}".format(dns_name, address)) z = dns.zone.from_xfr(dns.query.xfr(address, dns_name)) except dns.exception.FormError: print_notification("Zone transfer not allowed") return ips names = z.nodes.keys() print_success("Zone transfer successfull for {}, found {} entries".format(address, len(names))) for n in names: node = z[n] data = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A) if data: # TODO add hostnames to entries. # hostname = n.to_text() for item in data.items: address = item.address ips.append(address) return ips
python
{ "resource": "" }
q264934
resolve_domains
validation
def resolve_domains(domains, disable_zone=False): """ Resolves the list of domains and returns the ips. """ dnsresolver = dns.resolver.Resolver() ips = [] for domain in domains: print_notification("Resolving {}".format(domain)) try: result = dnsresolver.query(domain, 'A') for a in result.response.answer[0]: ips.append(str(a)) if not disable_zone: ips.extend(zone_transfer(str(a), domain)) except dns.resolver.NXDOMAIN as e: print_error(e) return ips
python
{ "resource": "" }
q264935
parse_ips
validation
def parse_ips(ips, netmask, include_public): """ Parses the list of ips, turns these into ranges based on the netmask given. Set include_public to True to include public IP adresses. """ hs = HostSearch() rs = RangeSearch() ranges = [] ips = list(set(ips)) included_ips = [] print_success("Found {} ips".format(len(ips))) for ip in ips: ip_address = ipaddress.ip_address(ip) if include_public or ip_address.is_private: # To stop the screen filling with ranges. if len(ips) < 15: print_success("Found ip: {}".format(ip)) host = hs.id_to_object(ip) host.add_tag('dns_discover') host.save() r = str(ipaddress.IPv4Network("{}/{}".format(ip, netmask), strict=False)) ranges.append(r) included_ips.append(ip) else: print_notification("Excluding ip {}".format(ip)) ranges = list(set(ranges)) print_success("Found {} ranges".format(len(ranges))) for rng in ranges: # To stop the screen filling with ranges. if len(ranges) < 15: print_success("Found range: {}".format(rng)) r = rs.id_to_object(rng) r.add_tag('dns_discover') r.save() stats = {} stats['ips'] = included_ips stats['ranges'] = ranges return stats
python
{ "resource": "" }
q264936
create_connection
validation
def create_connection(conf): """ Creates a connection based upon the given configuration object. """ host_config = {} host_config['hosts'] = [conf.get('jackal', 'host')] if int(conf.get('jackal', 'use_ssl')): host_config['use_ssl'] = True if conf.get('jackal', 'ca_certs'): host_config['ca_certs'] = conf.get('jackal', 'ca_certs') if int(conf.get('jackal', 'client_certs')): host_config['client_cert'] = conf.get('jackal', 'client_cert') host_config['client_key'] = conf.get('jackal', 'client_key') # Disable hostname checking for now. host_config['ssl_assert_hostname'] = False connections.create_connection(**host_config)
python
{ "resource": "" }
q264937
CoreSearch.search
validation
def search(self, number=None, *args, **kwargs): """ Searches the elasticsearch instance to retrieve the requested documents. """ search = self.create_search(*args, **kwargs) try: if number: response = search[0:number] else: args, _ = self.core_parser.parse_known_args() if args.number: response = search[0:args.number] else: response = search.scan() return [hit for hit in response] except NotFoundError: print_error("The index was not found, have you initialized the index?") return [] except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch") return []
python
{ "resource": "" }
q264938
CoreSearch.argument_search
validation
def argument_search(self): """ Uses the command line arguments to fill the search function and call it. """ arguments, _ = self.argparser.parse_known_args() return self.search(**vars(arguments))
python
{ "resource": "" }
q264939
CoreSearch.count
validation
def count(self, *args, **kwargs): """ Returns the number of results after filtering with the given arguments. """ search = self.create_search(*args, **kwargs) try: return search.count() except NotFoundError: print_error("The index was not found, have you initialized the index?") except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch")
python
{ "resource": "" }
q264940
CoreSearch.argument_count
validation
def argument_count(self): """ Uses the command line arguments to fill the count function and call it. """ arguments, _ = self.argparser.parse_known_args() return self.count(**vars(arguments))
python
{ "resource": "" }
q264941
CoreSearch.get_pipe
validation
def get_pipe(self, object_type): """ Returns a generator that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json. """ for line in sys.stdin: try: data = json.loads(line.strip()) obj = object_type(**data) yield obj except ValueError: yield self.id_to_object(line.strip())
python
{ "resource": "" }
q264942
RangeSearch.id_to_object
validation
def id_to_object(self, line): """ Resolves an ip adres to a range object, creating it if it doesn't exists. """ result = Range.get(line, ignore=404) if not result: result = Range(range=line) result.save() return result
python
{ "resource": "" }
q264943
RangeSearch.argparser
validation
def argparser(self): """ Argparser option with search functionality specific for ranges. """ core_parser = self.core_parser core_parser.add_argument('-r', '--range', type=str, help="The range to search for use") return core_parser
python
{ "resource": "" }
q264944
ServiceSearch.object_to_id
validation
def object_to_id(self, obj): """ Searches elasticsearch for objects with the same address, protocol, port and state. """ search = Service.search() search = search.filter("term", address=obj.address) search = search.filter("term", protocol=obj.protocol) search = search.filter("term", port=obj.port) search = search.filter("term", state=obj.state) if search.count(): result = search[0].execute()[0] return result.meta.id else: return None
python
{ "resource": "" }
q264945
UserSearch.id_to_object
validation
def id_to_object(self, line): """ Resolves the given id to a user object, if it doesn't exists it will be created. """ user = User.get(line, ignore=404) if not user: user = User(username=line) user.save() return user
python
{ "resource": "" }
q264946
UserSearch.get_domains
validation
def get_domains(self): """ Retrieves the domains of the users from elastic. """ search = User.search() search.aggs.bucket('domains', 'terms', field='domain', order={'_count': 'desc'}, size=100) response = search.execute() return [entry.key for entry in response.aggregations.domains.buckets]
python
{ "resource": "" }
q264947
DocMapper.get_pipe
validation
def get_pipe(self): """ Returns a list that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json. """ lines = [] for line in sys.stdin: try: lines.append(self.line_to_object(line.strip())) except ValueError: pass except KeyError: pass return lines
python
{ "resource": "" }
q264948
Protocol.tree2commands
validation
def tree2commands(self, adapter, session, lastcmds, xsync): '''Consumes an ET protocol tree and converts it to state.Command commands''' # do some preliminary sanity checks... # todo: do i really want to be using assert statements?... assert xsync.tag == constants.NODE_SYNCML assert len(xsync) == 2 assert xsync[0].tag == constants.CMD_SYNCHDR assert xsync[1].tag == constants.NODE_SYNCBODY version = xsync[0].findtext('VerProto') if version != constants.SYNCML_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML version "%s" (expected "%s")' \ % (version, constants.SYNCML_VERSION_1_2)) verdtd = xsync[0].findtext('VerDTD') if verdtd != constants.SYNCML_DTD_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML DTD version "%s" (expected "%s")' \ % (verdtd, constants.SYNCML_DTD_VERSION_1_2)) ret = self.initialize(adapter, session, xsync) hdrcmd = ret[0] if session.isServer: log.debug('received request SyncML message from "%s" (s%s.m%s)', hdrcmd.target, hdrcmd.sessionID, hdrcmd.msgID) else: log.debug('received response SyncML message from "%s" (s%s.m%s)', lastcmds[0].target, lastcmds[0].sessionID, lastcmds[0].msgID) try: return self._tree2commands(adapter, session, lastcmds, xsync, ret) except Exception, e: if not session.isServer: raise # TODO: make this configurable as to whether or not any error # is sent back to the peer as a SyncML "standardized" error # status... code = '%s.%s' % (e.__class__.__module__, e.__class__.__name__) msg = ''.join(traceback.format_exception_only(type(e), e)).strip() log.exception('failed while interpreting command tree: %s', msg) # TODO: for some reason, the active exception is not being logged... return [ hdrcmd, state.Command( name = constants.CMD_STATUS, cmdID = '1', msgRef = session.pendingMsgID, cmdRef = 0, sourceRef = xsync[0].findtext('Source/LocURI'), targetRef = xsync[0].findtext('Target/LocURI'), statusOf = constants.CMD_SYNCHDR, statusCode = constants.STATUS_COMMAND_FAILED, errorCode = code, errorMsg = msg, errorTrace = ''.join(traceback.format_exception(type(e), e, sys.exc_info()[2])), ), state.Command(name=constants.CMD_FINAL)]
python
{ "resource": "" }
q264949
initialize_indices
validation
def initialize_indices(): """ Initializes the indices """ Host.init() Range.init() Service.init() User.init() Credential.init() Log.init()
python
{ "resource": "" }
q264950
parse_single_computer
validation
def parse_single_computer(entry): """ Parse the entry into a computer object. """ computer = Computer(dns_hostname=get_field(entry, 'dNSHostName'), description=get_field( entry, 'description'), os=get_field(entry, 'operatingSystem'), group_id=get_field(entry, 'primaryGroupID')) try: ip = str(ipaddress.ip_address(get_field(entry, 'IPv4'))) except ValueError: ip = '' if ip: computer.ip = ip elif computer.dns_hostname: computer.ip = resolve_ip(computer.dns_hostname) return computer
python
{ "resource": "" }
q264951
parse_domain_computers
validation
def parse_domain_computers(filename): """ Parse the file and extract the computers, import the computers that resolve into jackal. """ with open(filename) as f: data = json.loads(f.read()) hs = HostSearch() count = 0 entry_count = 0 print_notification("Parsing {} entries".format(len(data))) for system in data: entry_count += 1 parsed = parse_single_computer(system) if parsed.ip: try: host = hs.id_to_object(parsed.ip) host.description.append(parsed.description) host.hostname.append(parsed.dns_hostname) if parsed.os: host.os = parsed.os host.domain_controller = parsed.dc host.add_tag('domaindump') host.save() count += 1 except ValueError: pass sys.stdout.write('\r') sys.stdout.write( "[{}/{}] {} resolved".format(entry_count, len(data), count)) sys.stdout.flush() sys.stdout.write('\r') return count
python
{ "resource": "" }
q264952
parse_user
validation
def parse_user(entry, domain_groups): """ Parses a single entry from the domaindump """ result = {} distinguished_name = get_field(entry, 'distinguishedName') result['domain'] = ".".join(distinguished_name.split(',DC=')[1:]) result['name'] = get_field(entry, 'name') result['username'] = get_field(entry, 'sAMAccountName') result['description'] = get_field(entry, 'description') result['sid'] = get_field(entry, 'objectSid').split('-')[-1] primary_group = get_field(entry, 'primaryGroupID') member_of = entry['attributes'].get('memberOf', []) groups = [] for member in member_of: for e in member.split(','): if e.startswith('CN='): groups.append(e[3:]) groups.append(domain_groups.get(primary_group, '')) result['groups'] = groups flags = [] try: uac = int(get_field(entry, 'userAccountControl')) for flag, value in uac_flags.items(): if uac & value: flags.append(flag) except ValueError: pass result['flags'] = flags return result
python
{ "resource": "" }
q264953
parse_domain_users
validation
def parse_domain_users(domain_users_file, domain_groups_file): """ Parses the domain users and groups files. """ with open(domain_users_file) as f: users = json.loads(f.read()) domain_groups = {} if domain_groups_file: with open(domain_groups_file) as f: groups = json.loads(f.read()) for group in groups: sid = get_field(group, 'objectSid') domain_groups[int(sid.split('-')[-1])] = get_field(group, 'cn') user_search = UserSearch() count = 0 total = len(users) print_notification("Importing {} users".format(total)) for entry in users: result = parse_user(entry, domain_groups) user = user_search.id_to_object(result['username']) user.name = result['name'] user.domain.append(result['domain']) user.description = result['description'] user.groups.extend(result['groups']) user.flags.extend(result['flags']) user.sid = result['sid'] user.add_tag("domaindump") user.save() count += 1 sys.stdout.write('\r') sys.stdout.write("[{}/{}]".format(count, total)) sys.stdout.flush() sys.stdout.write('\r') return count
python
{ "resource": "" }
q264954
import_domaindump
validation
def import_domaindump(): """ Parses ldapdomaindump files and stores hosts and users in elasticsearch. """ parser = argparse.ArgumentParser( description="Imports users, groups and computers result files from the ldapdomaindump tool, will resolve the names from domain_computers output for IPs") parser.add_argument("files", nargs='+', help="The domaindump files to import") arguments = parser.parse_args() domain_users_file = '' domain_groups_file = '' computer_count = 0 user_count = 0 stats = {} for filename in arguments.files: if filename.endswith('domain_computers.json'): print_notification('Parsing domain computers') computer_count = parse_domain_computers(filename) if computer_count: stats['hosts'] = computer_count print_success("{} hosts imported".format(computer_count)) elif filename.endswith('domain_users.json'): domain_users_file = filename elif filename.endswith('domain_groups.json'): domain_groups_file = filename if domain_users_file: print_notification("Parsing domain users") user_count = parse_domain_users(domain_users_file, domain_groups_file) if user_count: print_success("{} users imported".format(user_count)) stats['users'] = user_count Logger().log("import_domaindump", 'Imported domaindump, found {} user, {} systems'.format(user_count, computer_count), stats)
python
{ "resource": "" }
q264955
autocomplete
validation
def autocomplete(query, country=None, hurricanes=False, cities=True, timeout=5): """Make an autocomplete API request This can be used to find cities and/or hurricanes by name :param string query: city :param string country: restrict search to a specific country. Must be a two letter country code :param boolean hurricanes: whether to search for hurricanes or not :param boolean cities: whether to search for cities or not :param integer timeout: timeout of the api request :returns: result of the autocomplete API request :rtype: dict """ data = {} data['query'] = quote(query) data['country'] = country or '' data['hurricanes'] = 1 if hurricanes else 0 data['cities'] = 1 if cities else 0 data['format'] = 'JSON' r = requests.get(AUTOCOMPLETE_URL.format(**data), timeout=timeout) results = json.loads(r.content)['RESULTS'] return results
python
{ "resource": "" }
q264956
request
validation
def request(key, features, query, timeout=5): """Make an API request :param string key: API key to use :param list features: features to request. It must be a subset of :data:`FEATURES` :param string query: query to send :param integer timeout: timeout of the request :returns: result of the API request :rtype: dict """ data = {} data['key'] = key data['features'] = '/'.join([f for f in features if f in FEATURES]) data['query'] = quote(query) data['format'] = 'json' r = requests.get(API_URL.format(**data), timeout=timeout) results = json.loads(_unicode(r.content)) return results
python
{ "resource": "" }
q264957
_unicode
validation
def _unicode(string): """Try to convert a string to unicode using different encodings""" for encoding in ['utf-8', 'latin1']: try: result = unicode(string, encoding) return result except UnicodeDecodeError: pass result = unicode(string, 'utf-8', 'replace') return result
python
{ "resource": "" }
q264958
http_get_provider
validation
def http_get_provider(provider, request_url, params, token_secret, token_cookie = None): '''Handle HTTP GET requests on an authentication endpoint. Authentication flow begins when ``params`` has a ``login`` key with a value of ``start``. For instance, ``/auth/twitter?login=start``. :param str provider: An provider to obtain a user ID from. :param str request_url: The authentication endpoint/callback. :param dict params: GET parameters from the query string. :param str token_secret: An app secret to encode/decode JSON web tokens. :param str token_cookie: The current JSON web token, if available. :return: A dict containing any of the following possible keys: ``status``: an HTTP status code the server should sent ``redirect``: where the client should be directed to continue the flow ``set_token_cookie``: contains a JSON web token and should be stored by the client and passed in the next call. ``provider_user_id``: the user ID from the login provider ``provider_user_name``: the user name from the login provider ''' if not validate_provider(provider): raise InvalidUsage('Provider not supported') klass = getattr(socialauth.providers, provider.capitalize()) provider = klass(request_url, params, token_secret, token_cookie) if provider.status == 302: ret = dict(status = 302, redirect = provider.redirect) tc = getattr(provider, 'set_token_cookie', None) if tc is not None: ret['set_token_cookie'] = tc return ret if provider.status == 200 and provider.user_id is not None: ret = dict(status = 200, provider_user_id = provider.user_id) if provider.user_name is not None: ret['provider_user_name'] = provider.user_name return ret raise InvalidUsage('Invalid request')
python
{ "resource": "" }
q264959
Target.to_json
validation
def to_json(self): """ Method to call to get a serializable object for json.dump or jsonify based on the target :return: dict """ if self.subreference is not None: return { "source": self.objectId, "selector": { "type": "FragmentSelector", "conformsTo": "http://ontology-dts.org/terms/subreference", "value": self.subreference } } else: return {"source": self.objectId}
python
{ "resource": "" }
q264960
AnnotationResource.read
validation
def read(self): """ Read the contents of the Annotation Resource :return: the contents of the resource :rtype: str or bytes or flask.response """ if not self.__content__: self.__retriever__ = self.__resolver__.resolve(self.uri) self.__content__, self.__mimetype__ = self.__retriever__.read(self.uri) return self.__content__
python
{ "resource": "" }
q264961
build_index_and_mapping
validation
def build_index_and_mapping(triples): """index all triples into indexes and return their mappings""" ents = bidict() rels = bidict() ent_id = 0 rel_id = 0 collected = [] for t in triples: for e in (t.head, t.tail): if e not in ents: ents[e] = ent_id ent_id += 1 if t.relation not in rels: rels[t.relation] = rel_id rel_id += 1 collected.append(kgedata.TripleIndex(ents[t.head], rels[t.relation], ents[t.tail])) return collected, ents, rels
python
{ "resource": "" }
q264962
recover_triples_from_mapping
validation
def recover_triples_from_mapping(indexes, ents: bidict, rels: bidict): """recover triples from mapping.""" triples = [] for t in indexes: triples.append(kgedata.Triple(ents.inverse[t.head], rels.inverse[t.relation], ents.inverse[t.tail])) return triples
python
{ "resource": "" }
q264963
_transform_triple_numpy
validation
def _transform_triple_numpy(x): """Transform triple index into a 1-D numpy array.""" return np.array([x.head, x.relation, x.tail], dtype=np.int64)
python
{ "resource": "" }
q264964
pack_triples_numpy
validation
def pack_triples_numpy(triples): """Packs a list of triple indexes into a 2D numpy array.""" if len(triples) == 0: return np.array([], dtype=np.int64) return np.stack(list(map(_transform_triple_numpy, triples)), axis=0)
python
{ "resource": "" }
q264965
remove_near_duplicate_relation
validation
def remove_near_duplicate_relation(triples, threshold=0.97): """If entity pairs in a relation is as close as another relations, only keep one relation of such set.""" logging.debug("remove duplicate") _assert_threshold(threshold) duplicate_rel_counter = defaultdict(list) relations = set() for t in triples: duplicate_rel_counter[t.relation].append(f"{t.head} {t.tail}") relations.add(t.relation) relations = list(relations) num_triples = len(triples) removal_relation_set = set() for rel, values in duplicate_rel_counter.items(): duplicate_rel_counter[rel] = Superminhash(values) for i in relations: for j in relations: if i == j or i in removal_relation_set or j in removal_relation_set: continue close_relations = [i] if _set_close_to(duplicate_rel_counter[i], duplicate_rel_counter[j], threshold): close_relations.append(j) if len(close_relations) > 1: close_relations.pop(np.random.randint(len(close_relations))) removal_relation_set |= set(close_relations) logging.info("Removing {} relations: {}".format(len(removal_relation_set), str(removal_relation_set))) return list(filterfalse(lambda x: x.relation in removal_relation_set, triples))
python
{ "resource": "" }
q264966
remove_direct_link_triples
validation
def remove_direct_link_triples(train, valid, test): """Remove direct links in the training sets.""" pairs = set() merged = valid + test for t in merged: pairs.add((t.head, t.tail)) filtered = filterfalse(lambda t: (t.head, t.tail) in pairs or (t.tail, t.head) in pairs, train) return list(filtered)
python
{ "resource": "" }
q264967
Indexer.shrink_indexes_in_place
validation
def shrink_indexes_in_place(self, triples): """Uses a union find to find segment.""" _ent_roots = self.UnionFind(self._ent_id) _rel_roots = self.UnionFind(self._rel_id) for t in triples: _ent_roots.add(t.head) _ent_roots.add(t.tail) _rel_roots.add(t.relation) for i, t in enumerate(triples): h = _ent_roots.find(t.head) r = _rel_roots.find(t.relation) t = _ent_roots.find(t.tail) triples[i] = kgedata.TripleIndex(h, r, t) ents = bidict() available_ent_idx = 0 for previous_idx, ent_exist in enumerate(_ent_roots.roots()): if not ent_exist: self._ents.inverse.pop(previous_idx) else: ents[self._ents.inverse[previous_idx]] = available_ent_idx available_ent_idx += 1 rels = bidict() available_rel_idx = 0 for previous_idx, rel_exist in enumerate(_rel_roots.roots()): if not rel_exist: self._rels.inverse.pop(previous_idx) else: rels[self._rels.inverse[previous_idx]] = available_rel_idx available_rel_idx += 1 self._ents = ents self._rels = rels self._ent_id = available_ent_idx self._rel_id = available_rel_idx
python
{ "resource": "" }
q264968
IndexBuilder.freeze
validation
def freeze(self): """Create a usable data structure for serializing.""" data = super(IndexBuilder, self).freeze() try: # Sphinx >= 1.5 format # Due to changes from github.com/sphinx-doc/sphinx/pull/2454 base_file_names = data['docnames'] except KeyError: # Sphinx < 1.5 format base_file_names = data['filenames'] store = {} c = itertools.count() for prefix, items in iteritems(data['objects']): for name, (index, typeindex, _, shortanchor) in iteritems(items): objtype = data['objtypes'][typeindex] if objtype.startswith('cpp:'): split = name.rsplit('::', 1) if len(split) != 2: warnings.warn("What's up with %s?" % str((prefix, name, objtype))) continue prefix, name = split last_prefix = prefix.split('::')[-1] else: last_prefix = prefix.split('.')[-1] store[next(c)] = { 'filename': base_file_names[index], 'objtype': objtype, 'prefix': prefix, 'last_prefix': last_prefix, 'name': name, 'shortanchor': shortanchor, } data.update({'store': store}) return data
python
{ "resource": "" }
q264969
log_operation
validation
def log_operation(entities, operation_name, params=None): """Logs an operation done on an entity, possibly with other arguments """ if isinstance(entities, (list, tuple)): entities = list(entities) else: entities = [entities] p = {'name': operation_name, 'on': entities} if params: p['params'] = params _log(TYPE_CODES.OPERATION, p)
python
{ "resource": "" }
q264970
log_state
validation
def log_state(entity, state): """Logs a new state of an entity """ p = {'on': entity, 'state': state} _log(TYPE_CODES.STATE, p)
python
{ "resource": "" }
q264971
log_update
validation
def log_update(entity, update): """Logs an update done on an entity """ p = {'on': entity, 'update': update} _log(TYPE_CODES.UPDATE, p)
python
{ "resource": "" }
q264972
log_error
validation
def log_error(error, result): """Logs an error """ p = {'error': error, 'result':result} _log(TYPE_CODES.ERROR, p)
python
{ "resource": "" }
q264973
dict_cursor
validation
def dict_cursor(func): """ Decorator that provides a dictionary cursor to the calling function Adds the cursor as the second argument to the calling functions Requires that the function being decorated is an instance of a class or object that yields a cursor from a get_cursor(cursor_type=CursorType.DICT) coroutine or provides such an object as the first argument in its signature Yields: A client-side dictionary cursor """ @wraps(func) def wrapper(cls, *args, **kwargs): with (yield from cls.get_cursor(_CursorType.DICT)) as c: return (yield from func(cls, c, *args, **kwargs)) return wrapper
python
{ "resource": "" }
q264974
cursor
validation
def cursor(func): """ Decorator that provides a cursor to the calling function Adds the cursor as the second argument to the calling functions Requires that the function being decorated is an instance of a class or object that yields a cursor from a get_cursor() coroutine or provides such an object as the first argument in its signature Yields: A client-side cursor """ @wraps(func) def wrapper(cls, *args, **kwargs): with (yield from cls.get_cursor()) as c: return (yield from func(cls, c, *args, **kwargs)) return wrapper
python
{ "resource": "" }
q264975
nt_cursor
validation
def nt_cursor(func): """ Decorator that provides a namedtuple cursor to the calling function Adds the cursor as the second argument to the calling functions Requires that the function being decorated is an instance of a class or object that yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object as the first argument in its signature Yields: A client-side namedtuple cursor """ @wraps(func) def wrapper(cls, *args, **kwargs): with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c: return (yield from func(cls, c, *args, **kwargs)) return wrapper
python
{ "resource": "" }
q264976
transaction
validation
def transaction(func): """ Provides a transacted cursor which will run in autocommit=false mode For any exception the transaction will be rolled back. Requires that the function being decorated is an instance of a class or object that yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object as the first argument in its signature Yields: A client-side transacted named cursor """ @wraps(func) def wrapper(cls, *args, **kwargs): with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c: try: yield from c.execute('BEGIN') result = (yield from func(cls, c, *args, **kwargs)) except Exception: yield from c.execute('ROLLBACK') else: yield from c.execute('COMMIT') return result return wrapper
python
{ "resource": "" }
q264977
PostgresStore.count
validation
def count(cls, cur, table:str, where_keys: list=None): """ gives the number of records in the table Args: table: a string indicating the name of the table Returns: an integer indicating the number of records in the table """ if where_keys: where_clause, values = cls._get_where_clause_with_values(where_keys) query = cls._count_query_where.format(table, where_clause) q, t = query, values else: query = cls._count_query.format(table) q, t = query, () yield from cur.execute(q, t) result = yield from cur.fetchone() return int(result[0])
python
{ "resource": "" }
q264978
PostgresStore.insert
validation
def insert(cls, cur, table: str, values: dict): """ Creates an insert statement with only chosen fields Args: table: a string indicating the name of the table values: a dict of fields and values to be inserted Returns: A 'Record' object with table columns as properties """ keys = cls._COMMA.join(values.keys()) value_place_holder = cls._PLACEHOLDER * len(values) query = cls._insert_string.format(table, keys, value_place_holder[:-1]) yield from cur.execute(query, tuple(values.values())) return (yield from cur.fetchone())
python
{ "resource": "" }
q264979
PostgresStore.update
validation
def update(cls, cur, table: str, values: dict, where_keys: list) -> tuple: """ Creates an update query with only chosen fields Supports only a single field where clause Args: table: a string indicating the name of the table values: a dict of fields and values to be inserted where_keys: list of dictionary example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}] where_clause will look like ((name>%s and url=%s) or (type <= %s)) items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed Returns: an integer indicating count of rows deleted """ keys = cls._COMMA.join(values.keys()) value_place_holder = cls._PLACEHOLDER * len(values) where_clause, where_values = cls._get_where_clause_with_values(where_keys) query = cls._update_string.format(table, keys, value_place_holder[:-1], where_clause) yield from cur.execute(query, (tuple(values.values()) + where_values)) return (yield from cur.fetchall())
python
{ "resource": "" }
q264980
PostgresStore.delete
validation
def delete(cls, cur, table: str, where_keys: list): """ Creates a delete query with where keys Supports multiple where clause with and or or both Args: table: a string indicating the name of the table where_keys: list of dictionary example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}] where_clause will look like ((name>%s and url=%s) or (type <= %s)) items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed Returns: an integer indicating count of rows deleted """ where_clause, values = cls._get_where_clause_with_values(where_keys) query = cls._delete_query.format(table, where_clause) yield from cur.execute(query, values) return cur.rowcount
python
{ "resource": "" }
q264981
PostgresStore.select
validation
def select(cls, cur, table: str, order_by: str, columns: list=None, where_keys: list=None, limit=100, offset=0): """ Creates a select query for selective columns with where keys Supports multiple where claus with and or or both Args: table: a string indicating the name of the table order_by: a string indicating column name to order the results on columns: list of columns to select from where_keys: list of dictionary limit: the limit on the number of results offset: offset on the results example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}] where_clause will look like ((name>%s and url=%s) or (type <= %s)) items within each dictionary get 'AND'-ed and across dictionaries get 'OR'-ed Returns: A list of 'Record' object with table columns as properties """ if columns: columns_string = cls._COMMA.join(columns) if where_keys: where_clause, values = cls._get_where_clause_with_values(where_keys) query = cls._select_selective_column_with_condition.format(columns_string, table, where_clause, order_by, limit, offset) q, t = query, values else: query = cls._select_selective_column.format(columns_string, table, order_by, limit, offset) q, t = query, () else: if where_keys: where_clause, values = cls._get_where_clause_with_values(where_keys) query = cls._select_all_string_with_condition.format(table, where_clause, order_by, limit, offset) q, t = query, values else: query = cls._select_all_string.format(table, order_by, limit, offset) q, t = query, () yield from cur.execute(q, t) return (yield from cur.fetchall())
python
{ "resource": "" }
q264982
PostgresStore.raw_sql
validation
def raw_sql(cls, cur, query: str, values: tuple): """ Run a raw sql query Args: query : query string to execute values : tuple of values to be used with the query Returns: result of query as list of named tuple """ yield from cur.execute(query, values) return (yield from cur.fetchall())
python
{ "resource": "" }
q264983
serialize_text
validation
def serialize_text(out, text): """This method is used to append content of the `text` argument to the `out` argument. Depending on how many lines in the text, a padding can be added to all lines except the first one. Concatenation result is appended to the `out` argument. """ padding = len(out) # we need to add padding to all lines # except the first one add_padding = padding_adder(padding) text = add_padding(text, ignore_first_line=True) return out + text
python
{ "resource": "" }
q264984
format_value
validation
def format_value(value): """This function should return unicode representation of the value """ value_id = id(value) if value_id in recursion_breaker.processed: return u'<recursion>' recursion_breaker.processed.add(value_id) try: if isinstance(value, six.binary_type): # suppose, all byte strings are in unicode # don't know if everybody in the world uses anything else? return u"'{0}'".format(value.decode('utf-8')) elif isinstance(value, six.text_type): return u"u'{0}'".format(value) elif isinstance(value, (list, tuple)): # long lists or lists with multiline items # will be shown vertically values = list(map(format_value, value)) result = serialize_list(u'[', values, delimiter=u',') + u']' return force_unicode(result) elif isinstance(value, dict): items = six.iteritems(value) # format each key/value pair as a text, # calling format_value recursively items = (tuple(map(format_value, item)) for item in items) items = list(items) # sort by keys for readability items.sort() # for each item value items = [ serialize_text( u'{0}: '.format(key), item_value) for key, item_value in items] # and serialize these pieces as a list, enclosing # them into a curve brackets result = serialize_list(u'{', items, delimiter=u',') + u'}' return force_unicode(result) return force_unicode(repr(value)) finally: recursion_breaker.processed.remove(value_id)
python
{ "resource": "" }
q264985
traverse
validation
def traverse(element, query, deep=False): """ Helper function to traverse an element tree rooted at element, yielding nodes matching the query. """ # Grab the next part of the query (it will be chopped from the front each iteration). part = query[0] if not part: # If the part is blank, we encountered a //, meaning search all sub-nodes. query = query[1:] part = query[0] deep = True # Parse out any predicate (tag[pred]) from this part of the query. part, predicate = xpath_re.match(query[0]).groups() for c in element._children: if part in ('*', c.tagname) and c._match(predicate): # A potential matching branch: this child matches the next query part (and predicate). if len(query) == 1: # If this is the last part of the query, we found a matching element, yield it. yield c else: # Otherwise, check the children of this child against the next query part. for e in traverse(c, query[1:]): yield e if deep: # If we're searching all sub-nodes, traverse with the same query, regardless of matching. # This basically creates a recursion branch to search EVERYWHERE for anything after //. for e in traverse(c, query, deep=True): yield e
python
{ "resource": "" }
q264986
parse_query
validation
def parse_query(query): """ Given a simplified XPath query string, returns an array of normalized query parts. """ parts = query.split('/') norm = [] for p in parts: p = p.strip() if p: norm.append(p) elif '' not in norm: norm.append('') return norm
python
{ "resource": "" }
q264987
XmlElement.insert
validation
def insert(self, before, name, attrs=None, data=None): """ Inserts a new element as a child of this element, before the specified index or sibling. :param before: An :class:`XmlElement` or a numeric index to insert the new node before :param name: The tag name to add :param attrs: Attributes for the new tag :param data: CDATA for the new tag :returns: The newly-created element :rtype: :class:`XmlElement` """ if isinstance(before, self.__class__): if before.parent != self: raise ValueError('Cannot insert before an element with a different parent.') before = before.index # Make sure 0 <= before <= len(_children). before = min(max(0, before), len(self._children)) elem = self.__class__(name, attrs, data, parent=self, index=before) self._children.insert(before, elem) # Re-index all the children. for idx, c in enumerate(self._children): c.index = idx return elem
python
{ "resource": "" }
q264988
XmlElement.children
validation
def children(self, name=None, reverse=False): """ A generator yielding children of this node. :param name: If specified, only consider elements with this tag name :param reverse: If ``True``, children will be yielded in reverse declaration order """ elems = self._children if reverse: elems = reversed(elems) for elem in elems: if name is None or elem.tagname == name: yield elem
python
{ "resource": "" }
q264989
XmlElement._match
validation
def _match(self, pred): """ Helper function to determine if this node matches the given predicate. """ if not pred: return True # Strip off the [ and ] pred = pred[1:-1] if pred.startswith('@'): # An attribute predicate checks the existence (and optionally value) of an attribute on this tag. pred = pred[1:] if '=' in pred: attr, value = pred.split('=', 1) if value[0] in ('"', "'"): value = value[1:] if value[-1] in ('"', "'"): value = value[:-1] return self.attrs.get(attr) == value else: return pred in self.attrs elif num_re.match(pred): # An index predicate checks whether we are the n-th child of our parent (0-based). index = int(pred) if index < 0: if self.parent: # For negative indexes, count from the end of the list. return self.index == (len(self.parent._children) + index) else: # If we're the root node, the only index we could be is 0. return index == 0 else: return index == self.index else: if '=' in pred: tag, value = pred.split('=', 1) if value[0] in ('"', "'"): value = value[1:] if value[-1] in ('"', "'"): value = value[:-1] for c in self._children: if c.tagname == tag and c.data == value: return True else: # A plain [tag] predicate means we match if we have a child with tagname "tag". for c in self._children: if c.tagname == pred: return True return False
python
{ "resource": "" }
q264990
XmlElement.path
validation
def path(self, include_root=False): """ Returns a canonical path to this element, relative to the root node. :param include_root: If ``True``, include the root node in the path. Defaults to ``False``. """ path = '%s[%d]' % (self.tagname, self.index or 0) p = self.parent while p is not None: if p.parent or include_root: path = '%s[%d]/%s' % (p.tagname, p.index or 0, path) p = p.parent return path
python
{ "resource": "" }
q264991
XmlElement.iter
validation
def iter(self, name=None): """ Recursively find any descendants of this node with the given tag name. If a tag name is omitted, this will yield every descendant node. :param name: If specified, only consider elements with this tag name :returns: A generator yielding descendants of this node """ for c in self._children: if name is None or c.tagname == name: yield c for gc in c.find(name): yield gc
python
{ "resource": "" }
q264992
XmlElement.last
validation
def last(self, name=None): """ Returns the last child of this node. :param name: If specified, only consider elements with this tag name :rtype: :class:`XmlElement` """ for c in self.children(name, reverse=True): return c
python
{ "resource": "" }
q264993
XmlElement.parents
validation
def parents(self, name=None): """ Yields all parents of this element, back to the root element. :param name: If specified, only consider elements with this tag name """ p = self.parent while p is not None: if name is None or p.tagname == name: yield p p = p.parent
python
{ "resource": "" }
q264994
XmlElement.next
validation
def next(self, name=None): """ Returns the next sibling of this node. :param name: If specified, only consider elements with this tag name :rtype: :class:`XmlElement` """ if self.parent is None or self.index is None: return None for idx in xrange(self.index + 1, len(self.parent)): if name is None or self.parent[idx].tagname == name: return self.parent[idx]
python
{ "resource": "" }
q264995
XmlElement.prev
validation
def prev(self, name=None): """ Returns the previous sibling of this node. :param name: If specified, only consider elements with this tag name :rtype: :class:`XmlElement` """ if self.parent is None or self.index is None: return None for idx in xrange(self.index - 1, -1, -1): if name is None or self.parent[idx].tagname == name: return self.parent[idx]
python
{ "resource": "" }
q264996
WebObsResultsParser.get_observations
validation
def get_observations(self): """ Parses the HTML table into a list of dictionaries, each of which represents a single observation. """ if self.empty: return [] rows = list(self.tbody) observations = [] for row_observation, row_details in zip(rows[::2], rows[1::2]): data = {} cells = OBSERVATION_XPATH(row_observation) data['name'] = _clean_cell(cells[0]) data['date'] = _clean_cell(cells[1]) data['magnitude'] = _clean_cell(cells[3]) data['obscode'] = _clean_cell(cells[6]) cells = DETAILS_XPATH(row_details) data['comp1'] = _clean_cell(cells[0]) data['chart'] = _clean_cell(cells[3]).replace('None', '') data['comment_code'] = _clean_cell(cells[4]) data['notes'] = _clean_cell(cells[5]) observations.append(data) return observations
python
{ "resource": "" }
q264997
get_cache_key
validation
def get_cache_key(prefix, *args, **kwargs): """ Calculates cache key based on `args` and `kwargs`. `args` and `kwargs` must be instances of hashable types. """ hash_args_kwargs = hash(tuple(kwargs.iteritems()) + args) return '{}_{}'.format(prefix, hash_args_kwargs)
python
{ "resource": "" }
q264998
cache_func
validation
def cache_func(prefix, method=False): """ Cache result of function execution into the django cache backend. Calculate cache key based on `prefix`, `args` and `kwargs` of the function. For using like object method set `method=True`. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): cache_args = args if method: cache_args = args[1:] cache_key = get_cache_key(prefix, *cache_args, **kwargs) cached_value = cache.get(cache_key) if cached_value is None: cached_value = func(*args, **kwargs) cache.set(cache_key, cached_value) return cached_value return wrapper return decorator
python
{ "resource": "" }
q264999
get_or_default
validation
def get_or_default(func=None, default=None): """ Wrapper around Django's ORM `get` functionality. Wrap anything that raises ObjectDoesNotExist exception and provide the default value if necessary. `default` by default is None. `default` can be any callable, if it is callable it will be called when ObjectDoesNotExist exception will be raised. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except ObjectDoesNotExist: if callable(default): return default() else: return default return wrapper if func is None: return decorator else: return decorator(func)
python
{ "resource": "" }