text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_kv_args(self, args):
"""parse key-value style arguments""" |
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_headers(self, use_cookies, raw):
""" analyze headers from file or raw messages :return: (url, dat) :rtype: """ |
if not raw:
packet = helper.to_str(helper.read_file(self.fpth))
else:
packet = raw
dat = {}
pks = [x for x in packet.split('\n') if x.replace(' ', '')]
url = pks[0].split(' ')[1]
for i, cnt in enumerate(pks[1:]):
arr = cnt.split(':')
if len(arr) < 2:
continue
arr = [x.replace(' ', '') for x in arr]
_k, v = arr[0], ':'.join(arr[1:])
dat[_k] = v
if use_cookies:
try:
self.fmt_cookies(dat.pop('Cookie'))
except:
pass
self.headers = dat
self.url = 'https://{}{}'.format(self.headers.get('Host'), url)
return url, dat |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spawn(self, url, force_spawn=False):
"""use the url for creation of domain and fetch cookies - init cache dir by the url domain as ``<base>/domain`` - save the cookies to file ``<base>/domain/cookie.txt`` - init ``headers.get/post/json`` with response info - init ``site_dir/site_raw/site_media`` :param url: :type url: :param force_spawn: :type force_spawn: :return: :rtype: """ |
_url, domain = self.get_domain_home_from_url(url)
if not _url:
return False
self.cache['site_dir'] = os.path.join(self.cache['base'], self.domain)
for k in ['raw', 'media']:
self.cache['site_' + k] = os.path.join(self.cache['site_dir'], k)
helper.mkdir_p(self.cache['site_' + k], True)
ck_pth = os.path.join(self.cache['site_dir'], 'cookie.txt')
helper.mkdir_p(ck_pth)
name = os.path.join(self.cache['site_raw'], 'homepage')
# not force spawn and file ok
if not force_spawn and helper.is_file_ok(name):
# zlog.debug('{} exist!'.format(name))
self.sess.cookies = self.load_cookies(ck_pth)
return True
else:
zlog.debug('{} not exist!'.format(name))
res = self.sess.get(url, headers=self.__header__)
if res.status_code != 200:
return False
if res:
helper.write_file(res.content, name)
# self.load(url)
for k, v in self.headers.items():
self.headers[k] = res.request.headers
self.dump_cookies(cookies=self.sess.cookies, save_to=ck_pth)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_url_to_cache_id(self, url):
"""use of the url resource location as cached id e.g.: ``<domain>/foo/bar/a.html => <base>/domain/foo/bar/a.html`` - map the url to local file :param url: :type url: :return: :rtype: """ |
base, _ = self.get_domain_home_from_url(url)
if base == '':
# invalid url
_sub_page = ''
elif base == url or base + '/' == url:
# homepage
_sub_page = 'homepage'
else:
# sub page
_sub_page = url.replace(base, '').split('/')
_sub_page = '/'.join([x for x in _sub_page if x])
if _sub_page:
full_name = os.path.join(self.cache['site_raw'], _sub_page)
return full_name
else:
return _sub_page |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_sess_get(self, url):
"""get url by requests synchronized :param url: :type url: :return: :rtype: """ |
try:
res = self.sess.get(url, headers=self.headers['get'], timeout=self.timeout)
if res.status_code == 200:
return res.content
except (requests.ReadTimeout, requests.ConnectTimeout, requests.ConnectionError) as _:
zlog.error('failed of: {} with error: {}'.format(url, _)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, url, use_cache=True, show_log=False):
"""fetch the url ``raw info``, use cache first, if no cache hit, try get from Internet :param url: :type url: :param use_cache: :type use_cache: :param show_log: :type show_log: :return: the ``raw info`` of the url :rtype: ``str`` """ |
_name = self.map_url_to_cache_id(url)
raw = ''
hit = False
if use_cache:
hit = True
raw = self.load_from_cache(_name)
if not raw:
if show_log:
zlog.debug('from cache got nothing {}'.format(_name))
raw = self.do_sess_get(url)
if raw:
helper.write_file(raw, _name)
# if not raw:
# hit = True
# raw = self.load_from_cache(_name)
if show_log:
zlog.debug('[{}:{:>8}] get {}'.format('Cache' if hit else 'Net', len(raw), url))
return raw |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sync_save(self, res, overwrite=False):
""" save ``res`` to local synchronized :param res: {'url': '', 'name': ''} :type res: dict :param overwrite: :type overwrite: :return: :rtype: BeautifulSoup """ |
if not isinstance(res, dict):
raise CrawlerParamsError('res must be dict')
url_, file_name = res.get('url', ''), res.get('name', '')
if not url_ or not file_name:
raise CrawlerParamsError('url&name is needed!')
# log.debug('Sync {}'.format(res.get('name')))
# not overwrite and file exists
if not overwrite and helper.is_file_ok(file_name):
return True
cnt = self.do_sess_get(url_)
# get res failed
if not cnt:
return False
with open(file_name, 'wb') as f:
f.write(cnt)
zlog.debug('Sync Done {}'.format(res.get('name')))
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def crawl_raw(self, res):
""" crawl the raw doc, and save it asynchronous. :param res: {'url','', 'name': ''} :type res: ``dict`` :return: :rtype: """ |
cnt = await self.async_get(res)
if cnt:
loop_ = asyncio.get_event_loop()
await loop_.run_in_executor(None, self.write_hd, res.get('name'), cnt)
return True
else:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def _sem_crawl(self, sem, res):
""" use semaphore ``encapsulate`` the crawl_media \n with async crawl, should avoid crawl too fast to become DDos attack to the crawled server should set the ``semaphore size``, and take ``a little gap`` between each crawl behavior. :param sem: the size of semaphore :type sem: :param res: :type res: dict :return: :rtype: """ |
async with sem:
st_ = await self.crawl_raw(res)
if st_:
self.result['ok'] += 1
else:
self.result['fail'] += 1
# take a little gap
await asyncio.sleep(random.randint(0, 1)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def json_decoder(content, *args, **kwargs):
""" Json decoder parser to be used by service_client """ |
if not content:
return None
json_value = content.decode()
return json.loads(json_value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init(changelog_name):
"""Setup your project.""" |
changelog_path = find_chglog_file()
create_changelog_flag = True
mark = style("?", fg="blue", bold=True)
if not changelog_name:
if changelog_path:
filename = style(changelog_path.name, fg="blue", bold=True)
message = f" {mark} {filename} was found." " Is this the changelog file?"
if click.confirm(message):
changelog_name = changelog_path.name
create_changelog_flag = False
if create_changelog_flag:
message = f" {mark} Enter a name for the changelog:"
changelog_name = click.prompt(message, default=DEFAULT_CHANGELOG)
if create_changelog_flag:
create_chglog_file(changelog_name)
if changelog_name and create_changelog_flag:
update_config_file("changelog_file", changelog_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def changelog_file_option_validator(ctx, param, value):
"""Checks that the given file path exists in the current working directory. Returns a :class:`~pathlib.Path` object. If the file does not exist raises a :class:`~click.UsageError` exception. """ |
path = Path(value)
if not path.exists():
filename = click.style(path.name, fg="blue", bold=True)
ctx.fail(
"\n"
f" {x_mark} Unable to find {filename}\n"
' Run "$ brau init" to create one'
)
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def current_version_option_validator(ctx, param, value):
"""If a version string is provided, validates it. Otherwise it tries to determine the current version from the last Git tag that matches ``tag_pattern`` option. Return a :class:`~braulio.version.Version` object or **None**. """ |
current_version = None
if value:
try:
current_version = Version(value)
except ValueError:
ctx.fail(f"{value} is not a valid version string")
# Look for the last git tag for the curren version
git = Git()
tag_pattern = ctx.params["tag_pattern"]
versions = tag_analyzer(git.tags, tag_pattern, Version)
# User provided current version. Try to find a tag that match it.
if current_version:
for version in versions:
if version == current_version:
current_version = version
break
elif versions:
current_version = versions[0]
ctx.params["current_tag"] = current_version.tag if current_version else None
ctx.params["versions"] = versions
return current_version |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def layout(self, indent=' '):
"""This will indent each new tag in the body by given number of spaces.""" |
self.__indent(self.head, indent)
self.__indent(self.meta, indent)
self.__indent(self.stylesheet, indent)
self.__indent(self.header, indent)
self.__indent(self.body, indent, initial=3)
self.__indent(self.footer, indent)
self.__indent(self.body_pre_docinfo, indent, initial=3)
self.__indent(self.docinfo, indent) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" Show the intervention screen. """ |
application = Application(sys.argv, ignore_close=not SKIP_FILTER)
platform.hide_cursor()
with open(resource_filename(__name__, 'intervention.css')) as css:
application.setStyleSheet(css.read())
# exec() is required for objc so we must use spawn
# multiprocessing.set_start_method('spawn')
# target = do_nothing
# if sys.platform == 'darwin' and not SKIP_FILTER:
# from filters import filter_input
# target = filter_input
# pool = multiprocessing.Pool(1) # pylint: disable=not-callable
# def filter_input_done_cb(ignored):
# application.closeAllWindows()
# result = pool.apply_async(target, callback=filter_input_done_cb)
# pylint: disable=unused-variable
@atexit.register
def exit_handler():
"""
Clean up.
"""
logging.info('atexit triggered')
platform.show_cursor()
# # terminate the pool so we don't sit forever waiting on our get()
# logging.info('Terminating pool...')
# pool.terminate()
# logging.info('Joining pool...')
# pool.join()
# logging.info('Retrieving result...')
# try:
# # raise any exceptions raised by the input filtering code
# result.get(0)
# except multiprocessing.TimeoutError:
# logging.info('Timed out waiting for result.')
# def duration_reached():
# logging.info('Duration reached, exiting...')
# sys.exit(0)
# Run for DURATION and then exit
# QtCore.QTimer.singleShot(DURATION, duration_reached)
application.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, path, data, offset, fh):
# pragma: no cover """ This is a readonly filesystem right now """ |
# print("write {}".format(path))
with self.attr_lock:
base = self.attr[path][BASE_KEY]
staged = self.attr[path][STAGED_KEY]
if not staged.closed:
base.st_size += len(data)
staged.write(data)
return len(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cleanup(self):
# pragma: no cover """ cleans up data that's been in the cache for a while should be called from an async OS call like release? to not impact user :return: """ |
need_to_delete = [] # can't delete from a dict while iterating
with self.attr_lock:
now_time = time()
for path in self.cache:
if now_time - self.attr[path][TIMESTAMP_KEY] >= MAX_CACHE_TIME:
need_to_delete.append(path)
for path in need_to_delete:
del self.attr[path]
del self.cache[path] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def validate_args(args):
'''
Apply custom validation and actions based on parsed arguments.
Parameters
----------
args : argparse.Namespace
Result from ``parse_args`` method of ``argparse.ArgumentParser``
instance.
Returns
-------
argparse.Namespace
Reference to input ``args``, which have been validated/updated.
'''
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
if getattr(args, 'command', None) == 'install':
if args.requirements_file and not args.requirements_file.isfile():
print >> sys.stderr, ('Requirements file not available: {}'
.format(args.requirements_file))
raise SystemExit(-1)
elif not args.plugin and not args.requirements_file:
print >> sys.stderr, ('Requirements file or at least one plugin '
'must be specified.')
raise SystemExit(-2)
if hasattr(args, 'server_url'):
logger.debug('Using MicroDrop index server: "%s"', args.server_url)
args.server_url = SERVER_URL_TEMPLATE % args.server_url
if all([args.plugins_directory is None,
args.config_file is None]):
args.plugins_directory = get_plugins_directory()
elif args.plugins_directory is None:
args.config_file = args.config_file.realpath()
args.plugins_directory = get_plugins_directory(config_path=
args.config_file)
else:
args.plugins_directory = args.plugins_directory.realpath()
return args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detect_events(self, max_attempts=3):
"""Returns a list of `Event`s detected from differences in state between the current snapshot and the Kindle Library. `books` and `progress` attributes will be set with the latest API results upon successful completion of the function. Returns: If failed to retrieve progress, None Else, the list of `Event`s """ |
# Attempt to retrieve current state from KindleAPI
for _ in xrange(max_attempts):
try:
with KindleCloudReaderAPI\
.get_instance(self.uname, self.pword) as kcr:
self.books = kcr.get_library_metadata()
self.progress = kcr.get_library_progress()
except KindleAPIError:
continue
else:
break
else:
return None
# Calculate diffs from new progress
progress_map = {book.asin: self.progress[book.asin].locs[1]
for book in self.books}
new_events = self._snapshot.calc_update_events(progress_map)
update_event = UpdateEvent(datetime.now().replace(microsecond=0))
new_events.append(update_event)
self._event_buf.extend(new_events)
return new_events |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commit_events(self):
"""Applies all outstanding `Event`s to the internal state """ |
# Events are sorted such that, when applied in order, each event
# represents a logical change in state. That is, an event never requires
# future events' data in order to be parsed.
# e.g. All ADDs must go before START READINGs
# All START READINGs before all READs
for event in sorted(self._event_buf):
self.store.record_event(event)
self._snapshot.process_event(event)
self._event_buf = [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_registry_names(self, registry):
""" Returns functions names for a registry """ |
return ', '.join(
f.__name__ if not isinstance(f, tuple) else f[0].__name__
for f in getattr(self, registry, [])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def receiver(self, func=None, json=False):
""" Registers a receiver function """ |
self.receivers.append((func, json)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sender(self, func, routing=None, routing_re=None):
""" Registers a sender function """ |
if routing and not isinstance(routing, list):
routing = [routing]
if routing_re:
if not isinstance(routing_re, list):
routing_re = [routing_re]
routing_re[:] = [re.compile(r) for r in routing_re]
self.senders.append((func, routing, routing_re)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_openers(self, client, clients_list):
""" Calls openers callbacks """ |
for func in self.openers:
func(client, clients_list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_closers(self, client, clients_list):
""" Calls closers callbacks """ |
for func in self.closers:
func(client, clients_list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_receivers(self, client, clients_list, message):
""" Calls receivers callbacks """ |
# Try to parse JSON
try:
json_message = json.loads(message)
except ValueError:
json_message = None
for func, to_json in self.receivers:
# Check if json version is available
if to_json:
if json_message is None:
continue
msg = json_message
else:
msg = message
# Call callback
func(client, clients_list, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_senders(self, routing, clients_list, *args, **kwargs):
""" Calls senders callbacks """ |
for func, routings, routings_re in self.senders:
call_callback = False
# Message is published globally
if routing is None or (routings is None and routings_re is None):
call_callback = True
# Message is not published globally
else:
# Message is catched by a string routing key
if routings and routing in routings:
call_callback = True
# Message is catched by a regex routing key
if routings_re and any(r.match(routing) for r in routings_re):
call_callback = True
if call_callback:
func(routing, clients_list, *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_websocket_server(self, host='localhost', port=9090, debug=False):
""" Runs websocket server """ |
from .server import MeaseWebSocketServerFactory
websocket_factory = MeaseWebSocketServerFactory(
mease=self, host=host, port=port, debug=debug)
websocket_factory.run_server() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stackclimber(height=0):
# http://stackoverflow.com/a/900404/48251 """ Obtain the name of the caller's module. Uses the inspect module to find the caller's position in the module hierarchy. With the optional height argument, finds the caller's caller, and so forth. """ |
caller = inspect.stack()[height+1]
scope = caller[0].f_globals
path = scope['__name__'].split('__main__')[0].strip('.')
if path == '':
if scope['__package__']:
path = scope['__package__']
else:
path = os.path.basename(sys.argv[0]).split('.')[0]
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten_list(l):
""" Nested lists to single-level list, does not split strings""" |
return list(chain.from_iterable(repeat(x,1) if isinstance(x,str) else x for x in l)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_diff(list1, list2):
""" Ssymetric list difference """ |
diff_list = []
for item in list1:
if not item in list2:
diff_list.append(item)
for item in list2:
if not item in list1:
diff_list.append(item)
return diff_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def asym_list_diff(list1, list2):
""" Asymmetric list difference """ |
diff_list = []
for item in list1:
if not item in list2:
diff_list.append(item)
return diff_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next_tokens_in_sequence(observed, current):
""" Given the observed list of tokens, and the current list, finds out what should be next next emitted word """ |
idx = 0
for word in current:
if observed[idx:].count(word) != 0:
found_pos = observed.index(word, idx)
idx = max(idx + 1, found_pos)
# otherwise, don't increment idx
if idx < len(observed):
return observed[idx:]
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_latex(self):
""" Returns an interval representation """ |
if self.low == self.high:
if self.low * 10 % 10 == 0:
return "{0:d}".format(int(self.low))
else:
return "{0:0.2f}".format(self.low)
else:
t = ""
if self.low == -np.inf:
t += r"(-\infty, "
elif self.low * 10 % 10 == 0:
t += r"[{0:d}, ".format(int(self.low))
else:
t += r"[{0:0.2f}, ".format(self.low)
if self.high == np.inf:
t += r"\infty)"
elif self.high * 10 % 10 == 0:
t += r"{0:d}]".format(int(self.high))
else:
t += r"{0:0.2f}]".format(self.high)
return t |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" Main entry point for the `respect` command. """ |
args = parse_respect_args(sys.argv[1:])
if validate_username(args['<username>']):
print("processing...")
else:
print("@"+args['<username>'], "is not a valid username.")
print("Username may only contain alphanumeric ASCII characters or "
"dashes and cannot begin with a dash.")
return
try:
r = requests.get(urljoin(GITHUB_USERS, args['<username>']))
except ConnectionErrorException as e:
print('Connection Error from requests. Request again, please.')
print(e)
if r.status_code == 404 or r.status_code == 403:
session = login(401, args=args)
return dispatch(args, r, session)
elif r.status_code == 200:
return dispatch(args, response=r)
else:
raise UnknownStausCodeException |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear_caches():
# suppress(unused-function) """Clear all caches.""" |
for _, reader in _spellchecker_cache.values():
reader.close()
_spellchecker_cache.clear()
_valid_words_cache.clear()
_user_dictionary_cache.clear() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _comment_system_for_file(contents):
"""For file contents, return the comment system.""" |
if contents[0] == "#":
return FileCommentSystem(begin="#", middle="", end="", single="#")
elif contents[:2] == "/*":
return FileCommentSystem(begin="/*", middle="*", end="*/", single="//")
elif contents[:2] == "//":
return FileCommentSystem(begin="//", middle="//", end="", single="//")
elif contents[:3] == "rem":
return FileCommentSystem(begin="rem",
middle="rem",
end="",
single="rem")
else:
raise RuntimeError("Couldn't detect comment "
"system from {0}".format(contents[:3])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _split_line_with_offsets(line):
"""Split a line by delimiter, but yield tuples of word and offset. This function works by dropping all the english-like punctuation from a line (so parenthesis preceded or succeeded by spaces, periods, etc) and then splitting on spaces. """ |
for delimiter in re.finditer(r"[\.,:\;](?![^\s])", line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
for delimiter in re.finditer(r"[\"'\)\]\}>](?![^\.,\;:\"'\)\]\}>\s])",
line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
for delimiter in re.finditer(r"(?<![^\.,\;:\"'\(\[\{<\s])[\"'\(\[\{<]",
line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
# Treat hyphen separated words as separate words
line = line.replace("-", " ")
# Remove backticks
line = line.replace("`", " ")
for match in re.finditer(r"[^\s]+", line):
content = match.group(0)
if content.strip() != "":
yield (match.span()[0], content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_dictionary_file(dictionary_path):
"""Return all words in dictionary file as set.""" |
try:
return _user_dictionary_cache[dictionary_path]
except KeyError:
if dictionary_path and os.path.exists(dictionary_path):
with open(dictionary_path, "rt") as dict_f:
words = set(re.findall(r"(\w[\w']*\w|\w)",
" ".join(dict_f.read().splitlines())))
return words
return set() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def valid_words_set(path_to_user_dictionary=None, user_dictionary_words=None):
"""Get a set of valid words. If :path_to_user_dictionary: is specified, then the newline-separated words in that file will be added to the word set. """ |
def read_file(binary_file):
"""Read a binary file for its text lines."""
return binary_file.read().decode("ascii").splitlines()
try:
valid = _valid_words_cache[path_to_user_dictionary]
return valid
except KeyError:
words = set()
with resource_stream("polysquarelinter", "en_US.txt") as words_file:
words |= set(["".join(l).lower() for l in read_file(words_file)])
if path_to_user_dictionary:
# Add both case-sensitive and case-insensitive variants
# of words in user dictionary as they may be checked as
# though they are a regular word and a technical word.
words |= set([w.lower() for w in user_dictionary_words])
words |= user_dictionary_words
_valid_words_cache[path_to_user_dictionary] = words
return words |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_word_graph_file(name, file_storage, word_set):
"""Create a word graph file and open it in memory.""" |
word_graph_file = file_storage.create_file(name)
spelling.wordlist_to_graph_file(sorted(list(word_set)),
word_graph_file)
return copy_to_ram(file_storage).open_file(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_nonspellcheckable_tokens(line, block_out_regexes=None):
"""Return line with paths, urls and emails filtered out. Block out other strings of text matching :block_out_regexes: if passed in. """ |
all_block_out_regexes = [
r"[^\s]*:[^\s]*[/\\][^\s]*",
r"[^\s]*[/\\][^\s]*",
r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]+\b"
] + (block_out_regexes or list())
for block_regex in all_block_out_regexes:
for marker in re.finditer(block_regex, line):
spaces = " " * (marker.end() - marker.start())
line = line[:marker.start()] + spaces + line[marker.end():]
return line |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _chunk_from_ranges(contents_lines, start_line_index, start_column_index, end_line_index, end_column_index):
"""Create a _ChunkInfo from a range of lines and columns. :contents_lines: is the raw lines of a file. """ |
# If the start and end line are the same we have to compensate for
# that by subtracting start_column_index from end_column_index
if start_line_index == end_line_index:
end_column_index -= start_column_index
lines = contents_lines[start_line_index:end_line_index + 1]
lines[0] = lines[0][start_column_index:]
lines[-1] = lines[-1][:end_column_index]
return _ChunkInfo(start_line_index,
start_column_index,
lines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _token_at_col_in_line(line, column, token, token_len=None):
"""True if token is at column.""" |
if not token_len:
token_len = len(token)
remaining_len = len(line) - column
return (remaining_len >= token_len and
line[column:column + token_len] == token) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _maybe_append_chunk(chunk_info, line_index, column, contents, chunks):
"""Append chunk_info to chunks if it is set.""" |
if chunk_info:
chunks.append(_chunk_from_ranges(contents,
chunk_info[0],
chunk_info[1],
line_index,
column)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_spellcheckable_chunks(contents, comment_system):
"""Given some contents for a file, find chunks that can be spellchecked. This applies the following rules: 1. If the comment system comments individual lines, that whole line can be spellchecked from the point of the comment 2. If a comment-start marker or triple quote is found, keep going until a comment end marker or matching triple quote is found. 3. In both cases, ignore anything in triple backticks. """ |
state = InTextParser()
comment_system_transitions = CommentSystemTransitions(comment_system)
chunks = []
for line_index, line in enumerate(contents):
column = 0
line_len = len(line)
escape_next = False
# We hit a new line. If we were waiting until the end of the line
# then add a new chunk in here
(state,
column_delta,
chunk_info) = state.get_transition(line,
line_index,
0,
False,
comment_system_transitions)
_maybe_append_chunk(chunk_info,
line_index - 1,
len(contents[line_index - 1]),
contents,
chunks)
column += column_delta
while column < line_len:
# Check if the next character should be considered as escaped. That
# only happens if we are not escaped and the current character is
# a backslash.
is_escaped = escape_next
escape_next = not is_escaped and line[column] == "\\"
(state,
column_delta,
chunk_info) = state.get_transition(line,
line_index,
column,
is_escaped,
comment_system_transitions)
_maybe_append_chunk(chunk_info,
line_index,
column,
contents,
chunks)
column += column_delta
last_line_index = len(contents) - 1
(state,
column_delta,
chunk_info) = state.get_transition(contents[-1],
last_line_index,
len(contents[-1]),
False,
comment_system_transitions,
eof=True)
_maybe_append_chunk(chunk_info,
last_line_index,
len(contents[last_line_index]),
contents,
chunks)
return chunks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spellcheckable_and_shadow_contents(contents, block_out_regexes=None):
"""For contents, split into spellcheckable and shadow parts. :contents: is a list of lines in a file. The return value is a tuple of (chunks, shadow_contents). chunks is a list of _ChunkInfo, each of which contain a region of text to be spell-checked. shadow_contents is an array of characters and integers. The characters represent nonspellcheckable regions and any region which will be subject to spellcheck is denoted by a zero in place of that character. """ |
if not len(contents):
return ([], [])
comment_system = _comment_system_for_file(contents[0])
# Shadow contents excludes anything in quotes
chunks = _find_spellcheckable_chunks(contents, comment_system)
shadow_contents = _shadow_contents_from_chunks(contents,
chunks,
block_out_regexes)
return (chunks, shadow_contents) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _split_into_symbol_words(sym):
"""Split a technical looking word into a set of symbols. This handles cases where technical words are separated by dots or arrows, as is the convention in many programming languages. """ |
punc = r"[\s\-\*/\+\.,:\;=\)\(\[\]\{\}<>\|\?&\^\$@]"
words = [w.strip() for w in re.split(punc, sym)]
return words |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _error_if_word_invalid(word, valid_words_dictionary, technical_words_dictionary, line_offset, col_offset):
"""Return SpellcheckError if this non-technical word is invalid.""" |
word_lower = word.lower()
valid_words_result = valid_words_dictionary.corrections(word_lower)
if technical_words_dictionary:
technical_words_result = technical_words_dictionary.corrections(word)
else:
# No technical words available to make an otherwise invalid
# result value.
technical_words_result = Dictionary.Result(False, list())
if not valid_words_result.valid and not technical_words_result.valid:
return SpellcheckError(word,
line_offset,
col_offset,
valid_words_result.suggestions,
SpellcheckError.InvalidWord) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _error_if_symbol_unused(symbol_word, technical_words_dictionary, line_offset, col_offset):
"""Return SpellcheckError if this symbol is not used in the code.""" |
result = technical_words_dictionary.corrections(symbol_word,
distance=5,
prefix=0)
if not result.valid:
return SpellcheckError(symbol_word,
line_offset,
col_offset,
result.suggestions,
SpellcheckError.TechnicalWord) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def should_terminate_now(self, line, waiting_for):
"""Whether parsing within a comment should terminate now. This is used for comment systems where there is no comment-ending character. We need it for parsing disabled regions where we don't know where a comment block ends, but we know that a comment block could end at a line ending. It returns true if, for a given line, line is not a comment. """ |
if waiting_for not in (ParserState.EOL, self._end):
return False
if self._continue_regex:
return (re.match(self._continue_regex, line) is None)
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, comment_system_transitions, eof=False):
"""Return a parser state, a move-ahead amount, and an append range. If this parser state should terminate and return back to the TEXT state, then return that state and also any corresponding chunk that would have been yielded as a result. """ |
raise NotImplementedError("""Cannot instantiate base ParserState""") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, comment_system_transitions, eof=False):
"""Get transition from InTextParser.""" |
parser_transition = {
STATE_IN_COMMENT: InCommentParser,
STATE_IN_QUOTE: InQuoteParser
}
(state,
start_state_from,
waiting_until) = comment_system_transitions.from_text(line,
line_index,
column,
is_escaped)
# We need to move ahead by a certain number of characters
# if we hit a new state
if state != STATE_IN_TEXT:
return (parser_transition[state](start_state_from,
waiting_until),
start_state_from[1] - column,
None)
else:
return (self, 1, None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, comment_system_transitions, eof=False):
"""Get transition from DisabledParser.""" |
# If we are at the beginning of a line, to see if we should
# disable processing from this point onward and get out - this will
# happen if we reach the end of some comment block that doesn't have
# an explicit end marker. We can't detect line endings here because
# we want a disabled region to continue across multiple lines.
if (column == 0 and
comment_system_transitions.should_terminate_now(
line,
self._resume_waiting_for
)):
return (InTextParser(), 0, None)
# Need to be a bit careful here, since we need to check what the
# disabled parser was waiting for and disable on that, too.
if (_token_at_col_in_line(line,
column,
"```",
3) and
not _is_escaped(line, column, is_escaped)):
# Hit a disable token, so we resume the old parser
return (self._resume_parser((line_index, column + 3),
self._resume_waiting_for),
3,
None)
elif self._resume_waiting_for != ParserState.EOL:
wait_until_len = len(self._resume_waiting_for)
if (_token_at_col_in_line(line,
column,
self._resume_waiting_for,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
# Skip ahead to end of this token
return (InTextParser(),
len(self._waiting_until),
None)
elif eof:
# We hit the end of the file and were still in a comment
# state. Grab everything up to here.
return (InTextParser(), 0, None)
# Move ahead by one character otherwise
return (self, 1, None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, comment_system_transitions, eof=False):
"""Get transition from InCommentParser.""" |
del comment_system_transitions
if (_token_at_col_in_line(line,
column,
"```",
3) and
not _is_escaped(line, column, is_escaped)):
# Hit a disable token, so resume the last parser
return (DisabledParser((line_index, column + 3),
self.__class__,
self._waiting_until), 3, self._started_at)
elif self._waiting_until != ParserState.EOL:
wait_until_len = len(self._waiting_until)
if (_token_at_col_in_line(line,
column,
self._waiting_until,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
# Skip ahead to end of this token
return (InTextParser(),
len(self._waiting_until),
self._started_at)
elif self._waiting_until == ParserState.EOL and column == 0:
# We hit a new line and the state ends here. Return
# corresponding state
return (InTextParser(), 0, self._started_at)
elif eof:
# We hit the end of the file and were still in a comment
# state. Grab everything up to here.
return (InTextParser(), 0, self._started_at)
# Move ahead by one character otherwise
return (self, 1, None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, *args, **kwargs):
"""Get transition from InQuoteParser.""" |
del line_index
del args
del kwargs
wait_until_len = len(self._waiting_until)
if (_token_at_col_in_line(line,
column,
self._waiting_until,
wait_until_len) and
not _is_escaped(line, column, is_escaped)):
return (InTextParser(), 1, None)
return (self, 1, None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def corrections(self, word, prefix=1, distance=2):
"""Get corrections for word, if word is an invalid word. :prefix: is the number of characters the prefix of the word must have in common with the suggested corrections. :distance: is the character distance the corrections may have between the input word. This limits the number of available corrections but decreases the correction search space. The return value of this function is a Result tuple, with the :valid: member indicating whether the input word is a valid one and :suggestions: member containing a list of suggestions. """ |
if word not in self._words:
return Dictionary.Result(False,
self._corrector.suggest(word,
prefix=prefix,
maxdist=distance))
else:
return Dictionary.Result(True, list()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_dom(data):
""" Creates doublelinked DOM from `data`. Args: data (str/HTMLElement):
Either string or HTML element. Returns: obj: HTMLElement containing double linked DOM. """ |
if not isinstance(data, dhtmlparser.HTMLElement):
data = dhtmlparser.parseString(
utils.handle_encodnig(data)
)
dhtmlparser.makeDoubleLinked(data)
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _locate_element(dom, el_content, transformer=None):
""" Find element containing `el_content` in `dom`. Use `transformer` function to content of all elements in `dom` in order to correctly transforming them to match them with `el_content`. Args: dom (obj):
HTMLElement tree. el_content (str):
Content of element will be picked from `dom`. transformer (fn, default None):
Transforming function. Note: `transformer` parameter can be for example simple lambda:: lambda x: x.strip() Returns: list: Matching HTMLElements. """ |
return dom.find(
None,
fn=utils.content_matchs(el_content, transformer)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _match_elements(dom, matches):
""" Find location of elements matching patterns specified in `matches`. Args: dom (obj):
HTMLElement DOM tree. matches (dict):
Structure: ``{"var": {"data": "match", ..}, ..}``. Returns: dict: Structure: ``{"var": {"data": HTMLElement_obj, ..}, ..}`` """ |
out = {}
for key, content in matches.items():
pattern = content["data"].strip()
if "\n" in pattern:
pattern = pattern.split()
transformer = lambda x: x.strip().split()
else:
transformer = lambda x: x.strip()
matching_elements = _locate_element(
dom,
pattern,
transformer=transformer
)
not_found_msg = content.get("notfoundmsg", "").replace("$name", key)
if not not_found_msg.strip():
not_found_msg = "Can't locate variable '%s' with content '%s'!" % (
key,
pattern,
)
content["notfoundmsg"] = not_found_msg
# in case of multiple elements, find only elements with propert tagname
tagname = content.get("tagname", "").strip().lower()
if tagname:
matching_elements = filter(
lambda x: x.getTagName().strip().lower() == tagname,
matching_elements
)
if not matching_elements:
raise UserWarning(not_found_msg)
if len(matching_elements) > 1:
raise UserWarning(
"Ambigious content '%s'!" % content
+ "Content was found in multiple elements!"
)
out[key] = matching_elements[0]
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _collect_paths(element):
""" Collect all possible path which leads to `element`. Function returns standard path from root element to this, reverse path, which uses negative indexes for path, also some pattern matches, like "this is element, which has neighbour with id 7" and so on. Args: element (obj):
HTMLElement instance. Returns: list: List of :class:`.PathCall` and :class:`.Chained` objects. """ |
output = []
# look for element by parameters - sometimes the ID is unique
path = vectors.el_to_path_vector(element)
root = path[0]
params = element.params if element.params else None
match = root.find(element.getTagName(), params)
if len(match) == 1:
output.append(
PathCall("find", 0, [element.getTagName(), params])
)
# look for element by neighbours
output.extend(path_patterns.neighbours_pattern(element))
# look for elements by patterns - element, which parent has tagname, and
# which parent has tagname ..
output.extend(path_patterns.predecesors_pattern(element, root))
index_backtrack = []
last_index_backtrack = []
params_backtrack = []
last_params_backtrack = []
# look for element by paths from root to element
for el in reversed(path):
# skip root elements
if not el.parent:
continue
tag_name = el.getTagName()
match = el.parent.wfind(tag_name).childs
index = match.index(el)
index_backtrack.append(
PathCall("wfind", index, [tag_name])
)
last_index_backtrack.append(
PathCall("wfind", index - len(match), [tag_name])
)
# if element has some parameters, use them for lookup
if el.params:
match = el.parent.wfind(tag_name, el.params).childs
index = match.index(el)
params_backtrack.append(
PathCall("wfind", index, [tag_name, el.params])
)
last_params_backtrack.append(
PathCall("wfind", index - len(match), [tag_name, el.params])
)
else:
params_backtrack.append(
PathCall("wfind", index, [tag_name])
)
last_params_backtrack.append(
PathCall("wfind", index - len(match), [tag_name])
)
output.extend([
Chained(reversed(params_backtrack)),
Chained(reversed(last_params_backtrack)),
Chained(reversed(index_backtrack)),
Chained(reversed(last_index_backtrack)),
])
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_working_path(dom, path, element):
""" Check whether the path is working or not. Aply proper search function interpreting `path` to `dom` and check, if returned object is `element`. If so, return ``True``, otherwise ``False``. Args: dom (obj):
HTMLElement DOM. path (obj):
:class:`.PathCall` Instance containing informations about path and which function it require to obtain element the path is pointing to. element (obj):
HTMLElement instance used to decide whether `path` points to correct `element` or not. Returns: bool: True if `path` correctly points to proper `element`. """ |
def i_or_none(el, i):
"""
Return ``el[i]`` if the list is not blank, or None otherwise.
Args:
el (list, tuple): Any indexable object.
i (int): Index.
Returns:
obj: Element at index `i` if `el` is not blank, or ``None``.
"""
if not el:
return None
return el[i]
# map decoders of all paths to one dictionary to make easier to call them
path_functions = {
"find": lambda el, index, params:
i_or_none(el.find(*params), index),
"wfind": lambda el, index, params:
i_or_none(el.wfind(*params).childs, index),
"match": lambda el, index, params:
i_or_none(el.match(*params), index),
"left_neighbour_tag": lambda el, index, neigh_data:
i_or_none(
el.find(
neigh_data.tag_name,
neigh_data.params,
fn=utils.has_neigh(*neigh_data.fn_params, left=True)
),
index
),
"right_neighbour_tag": lambda el, index, neigh_data:
i_or_none(
el.find(
neigh_data.tag_name,
neigh_data.params,
fn=utils.has_neigh(*neigh_data.fn_params, left=False)
),
index
),
}
# call all decoders and see what you get from them
el = None
if isinstance(path, PathCall):
el = path_functions[path.call_type](dom, path.index, path.params)
elif isinstance(path, Chained):
for path in path.chain:
dom = path_functions[path.call_type](dom, path.index, path.params)
if not dom:
return False
el = dom
else:
raise UserWarning(
"Unknown type of path parameters! (%s)" % str(path)
)
if not el:
return False
# test whether returned item is the item we are looking for
return el.getContent().strip() == element.getContent().strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select_best_paths(examples):
""" Process `examples`, select only paths that works for every example. Select best paths with highest priority. Args: examples (dict):
Output from :func:`.read_config`. Returns: list: List of :class:`.PathCall` and :class:`.Chained` objects. """ |
possible_paths = {} # {varname: [paths]}
# collect list of all possible paths to all existing variables
for example in examples:
dom = _create_dom(example["html"])
matching_elements = _match_elements(dom, example["vars"])
for key, match in matching_elements.items():
if key not in possible_paths: # TODO: merge paths together?
possible_paths[key] = _collect_paths(match)
# leave only paths, that works in all examples where, are required
for example in examples:
dom = _create_dom(example["html"])
matching_elements = _match_elements(dom, example["vars"])
for key, paths in possible_paths.items():
if key not in matching_elements:
continue
possible_paths[key] = filter(
lambda path: _is_working_path(
dom,
path,
matching_elements[key]
),
paths
)
priorities = [
"find",
"left_neighbour_tag",
"right_neighbour_tag",
"wfind",
"match",
"Chained"
]
priorities = dict(map(lambda x: (x[1], x[0]), enumerate(priorities)))
# sort all paths by priority table
for key in possible_paths.keys():
possible_paths[key] = list(sorted(
possible_paths[key],
key=lambda x: priorities.get(x.call_type, 100)
))
return possible_paths |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _assert_obj_type(pub, name="pub", obj_type=DBPublication):
""" Make sure, that `pub` is instance of the `obj_type`. Args: pub (obj):
Instance which will be checked. name (str):
Name of the instance. Used in exception. Default `pub`. obj_type (class):
Class of which the `pub` should be instance. Default :class:`.DBPublication`. Raises: InvalidType: When the `pub` is not instance of `obj_type`. """ |
if not isinstance(pub, obj_type):
raise InvalidType(
"`%s` have to be instance of %s, not %s!" % (
name,
obj_type.__name__,
pub.__class__.__name__
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_publication(pub):
""" Save `pub` into database and into proper indexes. Attr: pub (obj):
Instance of the :class:`.DBPublication`. Returns: obj: :class:`.DBPublication` without data. Raises: InvalidType: When the `pub` is not instance of :class:`.DBPublication`. UnindexablePublication: When there is no index (property) which can be used to index `pub` in database. """ |
_assert_obj_type(pub)
_get_handler().store_object(pub)
return pub.to_comm(light_request=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear_dir(self):
"""Clear the output directory of all output files.""" |
for snapshot in output_utils.get_filenames(self.output_dir):
if snapshot.endswith('.pkl'):
os.remove(snapshot) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_snapshot_time(self, output_every=None, t_output_every=None):
"""Determine whether or not the model's iteration number is one where the runner is expected to make an output snapshot. """ |
if t_output_every is not None:
output_every = int(round(t_output_every // self.model.dt))
return not self.model.i % output_every |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iterate(self, n=None, n_upto=None, t=None, t_upto=None, output_every=None, t_output_every=None):
"""Run the model for a number of iterations, expressed in a number of options. Only one iteration argument should be passed. Only one output arguments should be passed. Parameters n: int Run the model for `n` iterations from its current point. n_upto: int Run the model so that its iteration number is at least `n_upto`. t: float Run the model for `t` time from its current point. t_upto: float Run the model so that its time is at least `t_upto`. output_every: int How many iterations should elapse between making model snapshots. t_upto: float How much time should elapse between making model snapshots. """ |
if t is not None:
t_upto = self.model.t + t
if t_upto is not None:
n_upto = int(round(t_upto // self.model.dt))
if n is not None:
n_upto = self.model.i + n
while self.model.i <= n_upto:
if self.is_snapshot_time(output_every, t_output_every):
self.make_snapshot()
self.model.iterate() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_snapshot(self):
"""Output a snapshot of the current model state, as a pickle of the `Model` object in a file inside the output directory, with a name determined by its iteration number. """ |
filename = join(self.output_dir, '{:010d}.pkl'.format(self.model.i))
output_utils.model_to_file(self.model, filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_line(self, line):
"""Parser for the debugging shell. Treat everything after the first token as one literal entity. Whitespace characters between the first token and the next first non-whitespace character are preserved. For example, ' foo dicj didiw ' is parsed as ( 'foo', ' dicj didiw ' ) Returns: A tuple (cmd, args), where the args is a list that consists of one and only one string containing everything after the cmd as is. """ |
line = line.lstrip()
toks = shlex.split(line)
cmd = toks[0]
arg = line[len(cmd):]
return cmd, [ arg, ] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def bind(self, **kwargs):
'''
creates a copy of the object without the
cached results and with the given keyword
arguments as properties.
'''
d = dict(self.__dict__)
for k in d.keys():
if k[0] == '_':
del d[k]
elif k.startswith('obj_'):
d[k] = d[k].bind(**kwargs)
d.update(kwargs)
return self.__class__(**d) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, name, *subkey):
""" retrieves a data item, or loads it if it is not present. """ |
if subkey == []:
return self.get_atomic(name)
else:
return self.get_subkey(name, tuple(subkey)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def val(self, name):
""" retrieves a value, substituting actual values for ConfigValue templates. """ |
v = getattr(self, name)
if hasattr(v, 'retrieve_value'):
v = v.retrieve_value(self.__dict__)
return v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def harvest_collection(community_name):
"""Harvest a Zenodo community's record metadata. Examples -------- You can harvest record metadata for a Zenodo community via its identifier name. For example, the identifier for LSST Data Management's Zenodo collection is ``'lsst-dm'``: ``collection`` is a :class:`~zenodio.harvest.Datacite3Collection` instance. Use its :meth:`~zenodio.harvest.Datacite3Collection.records` method to generate :class:`~zenodio.harvest.Datacite3Record` objects for individual records in the Zenodo collection. Parameters community_name : str Zenodo community identifier. Returns ------- collection : :class:`zenodio.harvest.Datacite3Collection` The :class:`~zenodio.harvest.Datacite3Collection` instance with record metadata downloaded from Zenodo. """ |
url = zenodo_harvest_url(community_name)
r = requests.get(url)
r.status_code
xml_content = r.content
return Datacite3Collection.from_collection_xml(xml_content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zenodo_harvest_url(community_name, format='oai_datacite3'):
"""Build a URL for the Zenodo Community's metadata. Parameters community_name : str Zenodo community identifier. format : str OAI-PMH metadata specification name. See https://zenodo.org/dev. Currently on ``oai_datacite3`` is supported. Returns ------- url : str OAI-PMH metadata URL. """ |
template = 'http://zenodo.org/oai2d?verb=ListRecords&' \
'metadataPrefix={metadata_format}&set=user-{community}'
return template.format(metadata_format=format,
community=community_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pluralize(value, item_key):
""""Force the value of a datacite3 key to be a list. ['Sick, Jonathan', 'Economou, Frossie'] Background When `xmltodict` proceses metadata, it turns XML tags into new key-value pairs whenever possible, even if the value should semantically be treated as a `list`. For example .. code-block:: xml <authors> <author>Sick, Jonathan</author> </authors Would be rendered by `xmltodict` as:: {'authors': {'author': 'Sick, Jonathan'}} While .. code-block:: xml <authors> <author>Sick, Jonathan</author> <author>Economou, Frossie</author> </authors is rendered by `xmltodict` as:: {'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}} This function ensures that values are *always* lists so that they can be treated uniformly. Parameters value : obj The value of a key from datacite metadata extracted by `xmltodict`. For example, `xmldict['authors']`. item_key : str Name of the tag for each item; for example, with the `'authors'` key the item key is `'author'`. Returns ------- item_values : list List of values of all items. """ |
v = value[item_key]
if not isinstance(v, list):
# Force a singular value to be a list
return [v]
else:
return v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_xmldict(cls, xml_dict):
"""Create an `Author` from a datacite3 metadata converted by `xmltodict`. Parameters xml_dict : :class:`collections.OrderedDict` A `dict`-like object mapping XML content for a single record (i.e., the contents of the ``record`` tag in OAI-PMH XML). This dict is typically generated from :mod:`xmltodict`. """ |
name = xml_dict['creatorName']
kwargs = {}
if 'affiliation' in xml_dict:
kwargs['affiliation'] = xml_dict['affiliation']
return cls(name, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def published(self, for_user=None):
""" For non-staff users, return items with a published status and whose publish and expiry dates fall before and after the current date when specified. """ |
from yacms.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(self, query, search_fields=None):
""" Build a queryset matching words in the given search query, treating quoted terms as exact phrases and taking into account + and - symbols as modifiers controlling which terms to require and exclude. """ |
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _clone(self, *args, **kwargs):
""" Ensure attributes are copied to subsequent queries. """ |
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def order_by(self, *field_names):
""" Mark the filter as being ordered if search has occurred. """ |
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iterator(self):
""" If search has occurred and no ordering has occurred, decorate each result with the number of search terms so that it can be sorted by the number of occurrence of terms. In the case of search fields that span model relationships, we cannot accurately match occurrences without some very complicated traversal code, which we won't attempt. So in this case, namely when there are no matches for a result (count=0), and search fields contain relationships (double underscores), we assume one match for one of the fields, and use the average weight of all search fields with relationships. """ |
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_search_fields(self):
""" Returns the search field names mapped to weights as a dict. Used in ``get_queryset`` below to tell ``SearchableQuerySet`` which search fields to use. Also used by ``DisplayableAdmin`` to populate Django admin's ``search_fields`` attribute. Search fields can be populated via ``SearchableManager.__init__``, which then get stored in ``SearchableManager._search_fields``, which serves as an approach for defining an explicit set of fields to be used. Alternatively and more commonly, ``search_fields`` can be defined on models themselves. In this case, we look at the model and all its base classes, and build up the search fields from all of those, so the search fields are implicitly built up from the inheritence chain. Finally if no search fields have been defined at all, we fall back to any fields that are ``CharField`` or ``TextField`` instances. """ |
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contribute_to_class(self, model, name):
""" Newer versions of Django explicitly prevent managers being accessed from abstract classes, which is behaviour the search API has always relied on. Here we reinstate it. """ |
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(self, *args, **kwargs):
""" Proxy to queryset's search method for the manager's model and any models that subclass from this manager's model if the model is abstract. """ |
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in apps.get_models()
if issubclass(m, self.model)]
parents = reduce(ior, [set(m._meta.get_parent_list())
for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = apps.get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in apps.get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def url_map(self, for_user=None, **kwargs):
""" Returns a dictionary of urls mapped to Displayable subclass instances, including a fake homepage instance if none exists. Used in ``yacms.core.sitemaps``. """ |
class Home:
title = _("Home")
home = Home()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in apps.get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_name(model_id):
""" Get the name for a model. :returns str: The model's name. If the id has no associated name, then "id = {ID} (no name)" is returned. """ |
name = _names.get(model_id)
if name is None:
name = 'id = %s (no name)' % str(model_id)
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def define_logger_func(self, level, field_names, default=NO_DEFAULT, filters=None, include_exc_info=False):
"""Define a new logger function that will log the given arguments with the given predefined keys. :param level: The log level to use for each call. :param field_names: Set of predefined keys. :param default: A default value for each key. :param filters: Additional filters for treating given arguments. :param include_exc_info: Include a stack trace with the log. Useful for the ``ERROR`` log level. :return: A function that will log given values with the predefined keys and the given log level. """ |
kv_formatter = KvFormatter(field_names, default, filters)
return lambda *a, **kw: self._log(level, kv_formatter(*a, **kw),
include_exc_info) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log(self, level, *args, **kwargs):
"""Delegate a log call to the underlying logger.""" |
return self._log_kw(level, args, kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exception(self, *args, **kwargs):
"""Delegate a exception call to the underlying logger.""" |
return self._log_kw(ERROR, args, kwargs, exc_info=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self):
""" Update all the switch values """ |
self.states = [bool(int(x)) for x in self.get('port list') or '0000'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def check_correct_audience(self, audience):
"Assert that Dataporten sends back our own client id as audience"
client_id, _ = self.get_key_and_secret()
if audience != client_id:
raise AuthException('Wrong audience') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ChoiceHumanReadable(choices, choice):
""" Return the human readable representation for a list of choices. @see https://docs.djangoproject.com/en/dev/ref/models/fields/#choices """ |
if choice == None: raise NoChoiceError()
for _choice in choices:
if _choice[0] == choice:
return _choice[1]
raise NoChoiceMatchError("The choice '%s' does not exist in '%s'" % (choice, ", ".join([choice[0] for choice in choices]))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_db_prep_value(self, value, connection=None, prepared=False):
"""Returns field's value prepared for interacting with the database backend. Used by the default implementations of ``get_db_prep_save``and `get_db_prep_lookup``` """ |
if not value:
return
if prepared:
return value
else:
assert(isinstance(value, list) or isinstance(value, tuple))
return self.separator.join([unicode(s) for s in value]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self):
""" Loads the user's SDB inventory Raises parseException """ |
self.inventory = SDBInventory(self.usr)
self.forms = self.inventory.forms |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self):
""" Upates the user's SDB inventory Loops through all items on a page and checks for an item that has changed. A changed item is identified as the remove attribute being set to anything greater than 0. It will then update each page accordingly with the changed items. Returns bool - True if successful, False otherwise """ |
for x in range(1, self.inventory.pages + 1):
if self._hasPageChanged(x):
form = self._updateForm(x)
form.usePin = True
pg = form.submit()
# Success redirects to SDB page
if "Your Safety Deposit Box" in pg.content:
return True
else:
logging.getLogger("neolib.shop").exception("Could not verify if SDB inventory was updated.", {'pg': pg})
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_series_url(key):
"""For internal use. Given a series key, generate a valid URL to the series endpoint for that key. :param string key: the series key :rtype: string""" |
url = urlparse.urljoin(endpoint.SERIES_ENDPOINT, 'key/')
url = urlparse.urljoin(url, urllib.quote(key))
return url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_series(self, key=None, tags=[], attrs={}):
"""Create a new series with an optional string key. A list of tags and a map of attributes can also be optionally supplied. :param string key: (optional) a string key for the series :param list tags: (optional) the tags to create the series with :param dict attrs: (optional) the attributes to the create the series with :rtype: :class:`tempodb.response.Response` object""" |
body = protocol.make_series_key(key, tags, attrs)
resp = self.session.post(endpoint.SERIES_ENDPOINT, body)
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_series(self, keys=None, tags=None, attrs=None, allow_truncation=False):
"""Delete a series according to the given criteria. **Note:** for the key argument, the filter will return the *union* of those values. For the tag and attr arguments, the filter will return the *intersection* of those values. :param keys: filter by one or more series keys :type keys: list or string :param tags: filter by one or more tags :type tags: list or string :param dict attrs: filter by one or more key-value attributes :param bool allow_truncation: whether to allow full deletion of a database. Default is False. :rtype: :class:`tempodb.response.Response` object""" |
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'allow_truncation': str(allow_truncation).lower()
}
url_args = endpoint.make_url_args(params)
url = '?'.join([endpoint.SERIES_ENDPOINT, url_args])
resp = self.session.delete(url)
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_series(self, key):
"""Get a series object from TempoDB given its key. :param string key: a string name for the series :rtype: :class:`tempodb.response.Response` with a :class:`tempodb.protocol.objects.Series` data payload""" |
url = make_series_url(key)
resp = self.session.get(url)
return resp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.