_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q271800 | InfoThread.stack_trace | test | def stack_trace(self, f):
"""A mini stack trace routine for threads."""
while f:
if (not self.core.ignore_filter.is_included(f)
or self.settings['dbg_trepan']):
s = Mstack.format_stack_entry(self, (f, f.f_lineno))
self.msg(" "*4 + s)
pass
f = f.f_back
pass
return | python | {
"resource": ""
} |
q271801 | InfoFiles.run | test | def run(self, args):
"""Get file information"""
if len(args) == 0:
if not self.proc.curframe:
self.errmsg("No frame - no default file.")
return False
filename = self.proc.curframe.f_code.co_filename
else:
filename = args[0]
pass
m = filename + ' is'
filename_cache = self.core.filename_cache
if filename in filename_cache:
m += " cached in debugger"
if filename_cache[filename] != filename:
m += ' as:'
m = Mmisc.wrapped_lines(m, filename_cache[filename] + '.',
self.settings['width'])
else:
m += '.'
pass
self.msg(m)
else:
matches = [file for file in file_list() if
file.endswith(filename)]
if (len(matches) > 1):
self.msg("Multiple files found ending filename string:")
for match_file in matches:
self.msg("\t%s" % match_file)
pass
elif len(matches) == 1:
canonic_name = pyficache.unmap_file(matches[0])
m += " matched debugger cache file:\n " + canonic_name
self.msg(m)
else:
self.msg(m + ' not cached in debugger.')
pass
canonic_name = self.core.canonic(filename)
self.msg(Mmisc.wrapped_lines('Canonic name:', canonic_name,
self.settings['width']))
for name in (canonic_name, filename):
if name in sys.modules:
for key in [k for k, v in list(sys.modules.items())
if name == v]:
self.msg("module: %s", key)
pass
pass
pass
for arg in args[1:]:
processed_arg = False
if arg in ['all', 'size']:
if pyficache.size(canonic_name):
self.msg("File has %d lines." %
pyficache.size(canonic_name))
pass
processed_arg = True
pass
if arg in ['all', 'sha1']:
self.msg("SHA1 is %s." % pyficache.sha1(canonic_name))
processed_arg = True
pass
if arg in ['all', 'brkpts']:
lines = pyficache.trace_line_numbers(canonic_name)
if lines:
self.section("Possible breakpoint line numbers:")
fmt_lines = columnize.columnize(lines, ljust = False,
arrange_vertical = False,
lineprefix=' ')
self.msg(fmt_lines)
pass
processed_arg = True
pass
if not processed_arg:
self.errmsg("Don't understand sub-option %s." % arg)
pass
pass
return | python | {
"resource": ""
} |
q271802 | checkfuncname | test | def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True | python | {
"resource": ""
} |
q271803 | BreakpointManager.delete_breakpoint | test | def delete_breakpoint(self, bp):
" remove breakpoint `bp'"
bpnum = bp.number
self.bpbynumber[bpnum] = None # No longer in list
index = (bp.filename, bp.line)
if index not in self.bplist: return False
self.bplist[index].remove(bp)
if not self.bplist[index]:
# No more breakpoints for this file:line combo
del self.bplist[index]
return True | python | {
"resource": ""
} |
q271804 | BreakpointManager.delete_breakpoint_by_number | test | def delete_breakpoint_by_number(self, bpnum):
"Remove a breakpoint given its breakpoint number."
success, msg, bp = self.get_breakpoint(bpnum)
if not success:
return False, msg
self.delete_breakpoint(bp)
return (True, '') | python | {
"resource": ""
} |
q271805 | BreakpointManager.en_disable_all_breakpoints | test | def en_disable_all_breakpoints(self, do_enable=True):
"Enable or disable all breakpoints."
bp_list = [bp for bp in self.bpbynumber if bp]
bp_nums = []
if do_enable:
endis = 'en'
else:
endis = 'dis'
pass
if not bp_list:
return "No breakpoints to %sable" % endis
for bp in bp_list:
bp.enabled = do_enable
bp_nums.append(str(bp.number))
pass
return ("Breakpoints %sabled: %s" % (endis, ", ".join(bp_nums))) | python | {
"resource": ""
} |
q271806 | BreakpointManager.en_disable_breakpoint_by_number | test | def en_disable_breakpoint_by_number(self, bpnum, do_enable=True):
"Enable or disable a breakpoint given its breakpoint number."
success, msg, bp = self.get_breakpoint(bpnum)
if not success:
return success, msg
if do_enable:
endis = 'en'
else:
endis = 'dis'
pass
if bp.enabled == do_enable:
return (False, ('Breakpoint (%r) previously %sabled' %
(str(bpnum), endis,)))
bp.enabled = do_enable
return (True, '') | python | {
"resource": ""
} |
q271807 | BreakpointManager.delete_breakpoints_by_lineno | test | def delete_breakpoints_by_lineno(self, filename, lineno):
"""Removes all breakpoints at a give filename and line number.
Returns a list of breakpoints numbers deleted.
"""
if (filename, lineno) not in self.bplist:
return []
breakpoints = self.bplist[(filename, lineno)]
bpnums = [bp.number for bp in breakpoints]
for bp in list(breakpoints):
self.delete_breakpoint(bp)
return bpnums | python | {
"resource": ""
} |
q271808 | ScriptInput.open | test | def open(self, inp, opts=None):
"""Use this to set what file to read from. """
if isinstance(inp, io.TextIOWrapper):
self.input = inp
elif isinstance(inp, 'string'.__class__): # FIXME
self.name = inp
self.input = open(inp, 'r')
else:
raise IOError("Invalid input type (%s) for %s" %
(inp.__class__.__name__, inp))
return | python | {
"resource": ""
} |
q271809 | ScriptInput.readline | test | def readline(self, prompt='', use_raw=None):
"""Read a line of input. Prompt and use_raw exist to be
compatible with other input routines and are ignored.
EOFError will be raised on EOF.
"""
line = self.input.readline()
if not line: raise EOFError
return line.rstrip("\n") | python | {
"resource": ""
} |
q271810 | ServerInterface.confirm | test | def confirm(self, prompt, default):
""" Called when a dangerous action is about to be done to make sure
it's okay. `prompt' is printed; user response is returned."""
while True:
try:
self.write_confirm(prompt, default)
reply = self.readline('').strip().lower()
except EOFError:
return default
if reply in ('y', 'yes'):
return True
elif reply in ('n', 'no'):
return False
else:
self.msg("Please answer y or n.")
pass
pass
return default | python | {
"resource": ""
} |
q271811 | LocationScanner.t_whitespace | test | def t_whitespace(self, s):
r'\s+'
self.add_token('SPACE', s)
self.pos += len(s)
pass | python | {
"resource": ""
} |
q271812 | LocationScanner.t_number | test | def t_number(self, s):
r'\d+'
pos = self.pos
self.add_token('NUMBER', int(s))
self.pos = pos + len(s) | python | {
"resource": ""
} |
q271813 | _AsyncExecution.as_future | test | def as_future(self, query):
"""Wrap a `sqlalchemy.orm.query.Query` object into a
`concurrent.futures.Future` so that it can be yielded.
Parameters
----------
query : sqlalchemy.orm.query.Query
SQLAlchemy query object to execute
Returns
-------
tornado.concurrent.Future
A `Future` object wrapping the given query so that tornado can
await/yield on it
"""
# concurrent.futures.Future is not compatible with the "new style"
# asyncio Future, and awaiting on such "old-style" futures does not
# work.
#
# tornado includes a `run_in_executor` function to help with this
# problem, but it's only included in version 5+. Hence, we copy a
# little bit of code here to handle this incompatibility.
if not self._pool:
self._pool = ThreadPoolExecutor(max_workers=self._max_workers)
old_future = self._pool.submit(query)
new_future = Future()
IOLoop.current().add_future(
old_future, lambda f: chain_future(f, new_future)
)
return new_future | python | {
"resource": ""
} |
q271814 | restore_original_login | test | def restore_original_login(request):
"""
Restore an original login session, checking the signed session
"""
original_session = request.session.get(la_settings.USER_SESSION_FLAG)
logout(request)
if not original_session:
return
try:
original_user_pk = signer.unsign(
original_session, max_age=timedelta(days=la_settings.USER_SESSION_DAYS_TIMESTAMP).total_seconds()
)
user = get_user_model().objects.get(pk=original_user_pk)
messages.info(
request,
la_settings.MESSAGE_LOGIN_REVERT.format(username=user.__dict__[username_field]),
extra_tags=la_settings.MESSAGE_EXTRA_TAGS,
)
login_as(user, request, store_original_user=False)
if la_settings.USER_SESSION_FLAG in request.session:
del request.session[la_settings.USER_SESSION_FLAG]
except SignatureExpired:
pass | python | {
"resource": ""
} |
q271815 | _load_module | test | def _load_module(path):
"""Code to load create user module. Copied off django-browserid."""
i = path.rfind(".")
module, attr = path[:i], path[i + 1 :]
try:
mod = import_module(module)
except ImportError:
raise ImproperlyConfigured("Error importing CAN_LOGIN_AS function: {}".format(module))
except ValueError:
raise ImproperlyConfigured("Error importing CAN_LOGIN_AS" " function. Is CAN_LOGIN_AS a" " string?")
try:
can_login_as = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured("Module {0} does not define a {1} " "function.".format(module, attr))
return can_login_as | python | {
"resource": ""
} |
q271816 | iterate_docs | test | def iterate_docs(client, expanded=False, progress=False):
"""
Yield each document in a Luminoso project in turn. Requires a client whose
URL points to a project.
If expanded=True, it will include additional fields that Luminoso added in
its analysis, such as 'terms' and 'vector'.
Otherwise, it will contain only the fields necessary to reconstruct the
document: 'title', 'text', and 'metadata'.
Shows a progress bar if progress=True.
"""
# Get total number of docs from the project record
num_docs = client.get()['document_count']
progress_bar = None
try:
if progress:
progress_bar = tqdm(desc='Downloading documents', total=num_docs)
for offset in range(0, num_docs, DOCS_PER_BATCH):
response = client.get('docs', offset=offset, limit=DOCS_PER_BATCH)
docs = response['result']
for doc in docs:
# Get the appropriate set of fields for each document
if expanded:
for field in UNNECESSARY_FIELDS:
doc.pop(field, None)
else:
doc = {field: doc[field] for field in CONCISE_FIELDS}
if progress:
progress_bar.update()
yield doc
finally:
if progress:
progress_bar.close() | python | {
"resource": ""
} |
q271817 | _main | test | def _main(argv):
"""
Handle arguments for the 'lumi-download' command.
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-b',
'--base-url',
default=URL_BASE,
help='API root url, default: %s' % URL_BASE,
)
parser.add_argument(
'-e', '--expanded',
help="Include Luminoso's analysis of each document, such as terms and"
' document vectors',
action='store_true',
)
parser.add_argument('-t', '--token', help='API authentication token')
parser.add_argument(
'-s',
'--save-token',
action='store_true',
help='save --token for --base-url to ~/.luminoso/tokens.json',
)
parser.add_argument(
'project_id', help='The ID of the project in the Daylight API'
)
parser.add_argument(
'output_file', nargs='?', default=None,
help='The JSON lines (.jsons) file to write to'
)
args = parser.parse_args(argv)
if args.save_token:
if not args.token:
raise ValueError("error: no token provided")
LuminosoClient.save_token(args.token,
domain=urlparse(args.base_url).netloc)
client = LuminosoClient.connect(url=args.base_url, token=args.token)
proj_client = client.client_for_path('projects/{}'.format(args.project_id))
download_docs(proj_client, args.output_file, args.expanded) | python | {
"resource": ""
} |
q271818 | transcode_to_stream | test | def transcode_to_stream(input_filename, date_format=None):
"""
Read a JSON or CSV file and convert it into a JSON stream, which will
be saved in an anonymous temp file.
"""
tmp = tempfile.TemporaryFile()
for entry in open_json_or_csv_somehow(input_filename,
date_format=date_format):
tmp.write(json.dumps(entry, ensure_ascii=False).encode('utf-8'))
tmp.write(b'\n')
tmp.seek(0)
return tmp | python | {
"resource": ""
} |
q271819 | open_json_or_csv_somehow | test | def open_json_or_csv_somehow(filename, date_format=None):
"""
Deduce the format of a file, within reason.
- If the filename ends with .csv or .txt, it's csv.
- If the filename ends with .jsons, it's a JSON stream (conveniently the
format we want to output).
- If the filename ends with .json, it could be a legitimate JSON file, or
it could be a JSON stream, following a nonstandard convention that many
people including us are guilty of. In that case:
- If the first line is a complete JSON document, and there is more in the
file besides the first line, then it is a JSON stream.
- Otherwise, it is probably really JSON.
- If the filename does not end with .json, .jsons, or .csv, we have to guess
whether it's still CSV or tab-separated values or something like that.
If it's JSON, the first character would almost certainly have to be a
bracket or a brace. If it isn't, assume it's CSV or similar.
"""
fileformat = None
if filename.endswith('.csv'):
fileformat = 'csv'
elif filename.endswith('.jsons'):
fileformat = 'jsons'
else:
with open(filename) as opened:
line = opened.readline()
if line[0] not in '{[' and not filename.endswith('.json'):
fileformat = 'csv'
else:
if (line.count('{') == line.count('}') and
line.count('[') == line.count(']')):
# This line contains a complete JSON document. This probably
# means it's in linewise JSON ('.jsons') format, unless the
# whole file is on one line.
char = ' '
while char.isspace():
char = opened.read()
if char == '':
fileformat = 'json'
break
if fileformat is None:
fileformat = 'jsons'
else:
fileformat = 'json'
if fileformat == 'json':
stream = json.load(open(filename), encoding='utf-8')
elif fileformat == 'csv':
stream = open_csv_somehow(filename)
else:
stream = stream_json_lines(filename)
return _normalize_data(stream, date_format=date_format) | python | {
"resource": ""
} |
q271820 | _normalize_data | test | def _normalize_data(stream, date_format=None):
"""
This function is meant to normalize data for upload to the Luminoso
Analytics system. Currently it only normalizes dates.
If date_format is not specified, or if there's no date in a particular doc,
the the doc is yielded unchanged.
"""
for doc in stream:
if 'date' in doc and date_format is not None:
try:
doc['date'] = _convert_date(doc['date'], date_format)
except ValueError:
# ValueErrors cover the cases when date_format does not match
# the actual format of the date, both for epoch and non-epoch
# times.
logger.exception('%s does not match the date format %s;'
% (doc['date'], date_format))
yield doc | python | {
"resource": ""
} |
q271821 | _convert_date | test | def _convert_date(date_string, date_format):
"""
Convert a date in a given format to epoch time. Mostly a wrapper for
datetime's strptime.
"""
if date_format != 'epoch':
return datetime.strptime(date_string, date_format).timestamp()
else:
return float(date_string) | python | {
"resource": ""
} |
q271822 | detect_file_encoding | test | def detect_file_encoding(filename):
"""
Use ftfy to detect the encoding of a file, based on a sample of its
first megabyte.
ftfy's encoding detector is limited. The only encodings it can detect are
UTF-8, CESU-8, UTF-16, Windows-1252, and occasionally MacRoman. But it
does much better than chardet.
"""
with open(filename, 'rb') as opened:
sample = opened.read(2 ** 20)
_, encoding = ftfy.guess_bytes(sample)
return encoding | python | {
"resource": ""
} |
q271823 | stream_json_lines | test | def stream_json_lines(file):
"""
Load a JSON stream and return a generator, yielding one object at a time.
"""
if isinstance(file, string_type):
file = open(file, 'rb')
for line in file:
line = line.strip()
if line:
if isinstance(line, bytes):
line = line.decode('utf-8')
yield json.loads(line) | python | {
"resource": ""
} |
q271824 | transcode_to_utf8 | test | def transcode_to_utf8(filename, encoding):
"""
Convert a file in some other encoding into a temporary file that's in
UTF-8.
"""
tmp = tempfile.TemporaryFile()
for line in io.open(filename, encoding=encoding):
tmp.write(line.strip('\uFEFF').encode('utf-8'))
tmp.seek(0)
return tmp | python | {
"resource": ""
} |
q271825 | open_csv_somehow_py2 | test | def open_csv_somehow_py2(filename):
"""
Open a CSV file using Python 2's CSV module, working around the deficiency
where it can't handle the null bytes of UTF-16.
"""
encoding = detect_file_encoding(filename)
if encoding.startswith('UTF-16'):
csvfile = transcode_to_utf8(filename, encoding)
encoding = 'UTF-8'
else:
csvfile = open(filename, 'rU')
line = csvfile.readline()
csvfile.seek(0)
if '\t' in line:
# tab-separated
reader = csv.reader(csvfile, delimiter='\t')
else:
reader = csv.reader(csvfile, dialect='excel')
header = reader.next()
header = [cell.decode(encoding).lower().strip() for cell in header]
encode_fn = lambda x: x.decode(encoding, 'replace')
return _read_csv(reader, header, encode_fn) | python | {
"resource": ""
} |
q271826 | _read_csv | test | def _read_csv(reader, header, encode_fn):
"""
Given a constructed CSV reader object, a header row that we've read, and
a detected encoding, yield its rows as dictionaries.
"""
for row in reader:
if len(row) == 0:
continue
row = [encode_fn(cell) for cell in row]
row_list = zip(header, row)
row_dict = dict(row_list)
if len(row_dict['text']) == 0:
continue
row_dict['text'] = unicodedata.normalize(
'NFKC', row_dict['text'].strip()
)
if row_dict.get('title') == '':
del row_dict['title']
if 'date' in row_dict:
# We handle dates further in open_json_or_csv_somehow
if row_dict['date'] == '':
del row_dict['date']
if 'subset' in row_dict:
subsets = [cell[1] for cell in row_list
if cell[1] != '' and cell[0] == 'subset']
if subsets:
row_dict['subsets'] = subsets
if 'subset' in row_dict:
del row_dict['subset']
yield row_dict | python | {
"resource": ""
} |
q271827 | main | test | def main():
"""
Handle command line arguments to convert a file to a JSON stream as a
script.
"""
logging.basicConfig(level=logging.INFO)
import argparse
parser = argparse.ArgumentParser(
description="Translate CSV or JSON input to a JSON stream, or verify "
"something that is already a JSON stream."
)
parser.add_argument('input',
help='A CSV, JSON, or JSON stream file to read.')
parser.add_argument('output', nargs='?', default=None,
help="The filename to output to. Recommended extension is .jsons. "
"If omitted, use standard output.")
args = parser.parse_args()
transcode(args.input, args.output) | python | {
"resource": ""
} |
q271828 | LuminosoClient.connect | test | def connect(cls, url=None, token_file=None, token=None):
"""
Returns an object that makes requests to the API, authenticated
with a saved or specified long-lived token, at URLs beginning with
`url`.
If no URL is specified, or if the specified URL is a path such as
'/projects' without a scheme and domain, the client will default to
https://analytics.luminoso.com/api/v5/.
If neither token nor token_file are specified, the client will look
for a token in $HOME/.luminoso/tokens.json. The file should contain
a single json dictionary of the format
`{'root_url': 'token', 'root_url2': 'token2', ...}`.
"""
if url is None:
url = '/'
if url.startswith('http'):
root_url = get_root_url(url)
else:
url = URL_BASE + '/' + url.lstrip('/')
root_url = URL_BASE
if token is None:
token_file = token_file or get_token_filename()
try:
with open(token_file) as tf:
token_dict = json.load(tf)
except FileNotFoundError:
raise LuminosoAuthError('No token file at %s' % token_file)
try:
token = token_dict[urlparse(root_url).netloc]
except KeyError:
raise LuminosoAuthError('No token stored for %s' % root_url)
session = requests.session()
session.auth = _TokenAuth(token)
return cls(session, url) | python | {
"resource": ""
} |
q271829 | LuminosoClient.save_token | test | def save_token(token, domain='analytics.luminoso.com', token_file=None):
"""
Take a long-lived API token and store it to a local file. Long-lived
tokens can be retrieved through the UI. Optional arguments are the
domain for which the token is valid and the file in which to store the
token.
"""
token_file = token_file or get_token_filename()
if os.path.exists(token_file):
saved_tokens = json.load(open(token_file))
else:
saved_tokens = {}
saved_tokens[domain] = token
directory, filename = os.path.split(token_file)
if directory and not os.path.exists(directory):
os.makedirs(directory)
with open(token_file, 'w') as f:
json.dump(saved_tokens, f) | python | {
"resource": ""
} |
q271830 | LuminosoClient._request | test | def _request(self, req_type, url, **kwargs):
"""
Make a request via the `requests` module. If the result has an HTTP
error status, convert that to a Python exception.
"""
logger.debug('%s %s' % (req_type, url))
result = self.session.request(req_type, url, **kwargs)
try:
result.raise_for_status()
except requests.HTTPError:
error = result.text
try:
error = json.loads(error)
except ValueError:
pass
if result.status_code in (401, 403):
error_class = LuminosoAuthError
elif result.status_code in (400, 404, 405):
error_class = LuminosoClientError
elif result.status_code >= 500:
error_class = LuminosoServerError
else:
error_class = LuminosoError
raise error_class(error)
return result | python | {
"resource": ""
} |
q271831 | LuminosoClient.delete | test | def delete(self, path='', **params):
"""
Make a DELETE request to the given path, and return the JSON-decoded
result.
Keyword parameters will be converted to URL parameters.
DELETE requests ask to delete the object represented by this URL.
"""
params = jsonify_parameters(params)
url = ensure_trailing_slash(self.url + path.lstrip('/'))
return self._json_request('delete', url, params=params) | python | {
"resource": ""
} |
q271832 | LuminosoClient.wait_for_build | test | def wait_for_build(self, interval=5, path=None):
"""
A convenience method designed to inform you when a project build has
completed. It polls the API every `interval` seconds until there is
not a build running. At that point, it returns the "last_build_info"
field of the project record if the build succeeded, and raises a
LuminosoError with the field as its message if the build failed.
If a `path` is not specified, this method will assume that its URL is
the URL for the project. Otherwise, it will use the specified path
(which should be "/projects/<project_id>/").
"""
path = path or ''
start = time.time()
next_log = 0
while True:
response = self.get(path)['last_build_info']
if not response:
raise ValueError('This project is not building!')
if response['stop_time']:
if response['success']:
return response
else:
raise LuminosoError(response)
elapsed = time.time() - start
if elapsed > next_log:
logger.info('Still waiting (%d seconds elapsed).', next_log)
next_log += 120
time.sleep(interval) | python | {
"resource": ""
} |
q271833 | get_root_url | test | def get_root_url(url, warn=True):
"""
Get the "root URL" for a URL, as described in the LuminosoClient
documentation.
"""
parsed_url = urlparse(url)
# Make sure it's a complete URL, not a relative one
if not parsed_url.scheme:
raise ValueError('Please supply a full URL, beginning with http:// '
'or https:// .')
# Issue a warning if the path didn't already start with /api/v4
root_url = '%s://%s/api/v4' % (parsed_url.scheme, parsed_url.netloc)
if warn and not parsed_url.path.startswith('/api/v4'):
logger.warning('Using %s as the root url' % root_url)
return root_url | python | {
"resource": ""
} |
q271834 | LuminosoClient.save_token | test | def save_token(self, token_file=None):
"""
Obtain the user's long-lived API token and save it in a local file.
If the user has no long-lived API token, one will be created.
Returns the token that was saved.
"""
tokens = self._json_request('get', self.root_url + '/user/tokens/')
long_lived = [token['type'] == 'long_lived' for token in tokens]
if any(long_lived):
dic = tokens[long_lived.index(True)]
else:
# User doesn't have a long-lived token, so create one
dic = self._json_request('post', self.root_url + '/user/tokens/')
token = dic['token']
token_file = token_file or get_token_filename()
if os.path.exists(token_file):
saved_tokens = json.load(open(token_file))
else:
saved_tokens = {}
saved_tokens[urlparse(self.root_url).netloc] = token
directory, filename = os.path.split(token_file)
if directory and not os.path.exists(directory):
os.makedirs(directory)
with open(token_file, 'w') as f:
json.dump(saved_tokens, f)
return token | python | {
"resource": ""
} |
q271835 | LuminosoClient._json_request | test | def _json_request(self, req_type, url, **kwargs):
"""
Make a request of the specified type and expect a JSON object in
response.
If the result has an 'error' value, raise a LuminosoAPIError with
its contents. Otherwise, return the contents of the 'result' value.
"""
response = self._request(req_type, url, **kwargs)
try:
json_response = response.json()
except ValueError:
logger.error("Received response with no JSON: %s %s" %
(response, response.content))
raise LuminosoError('Response body contained no JSON. '
'Perhaps you meant to use get_raw?')
if json_response.get('error'):
raise LuminosoAPIError(json_response.get('error'))
return json_response['result'] | python | {
"resource": ""
} |
q271836 | LuminosoClient.post_data | test | def post_data(self, path, data, content_type, **params):
"""
Make a POST request to the given path, with `data` in its body.
Return the JSON-decoded result.
The content_type must be set to reflect the kind of data being sent,
which is often `application/json`.
Keyword parameters will be converted to URL parameters. This is unlike
other POST requests which encode those parameters in the body, because
the body is already being used.
This is used by the Luminoso API to upload new documents in JSON
format.
"""
params = jsonify_parameters(params)
url = ensure_trailing_slash(self.url + path.lstrip('/'))
return self._json_request('post', url,
params=params,
data=data,
headers={'Content-Type': content_type}
) | python | {
"resource": ""
} |
q271837 | LuminosoClient.change_path | test | def change_path(self, path):
"""
Return a new LuminosoClient for a subpath of this one.
For example, you might want to start with a LuminosoClient for
`https://analytics.luminoso.com/api/v4/`, then get a new one for
`https://analytics.luminoso.com/api/v4/projects/myaccount/myprojectid`.
You accomplish that with the following call:
newclient = client.change_path('projects/myaccount/myproject_id')
If you start the path with `/`, it will start from the root_url
instead of the current url:
project_area = newclient.change_path('/projects/myaccount')
The advantage of using `.change_path` is that you will not need to
re-authenticate like you would if you ran `.connect` again.
You can use `.change_path` to split off as many sub-clients as you
want, and you don't have to stop using the old one just because you
got a new one with `.change_path`.
"""
if path.startswith('/'):
url = self.root_url + path
else:
url = self.url + path
return self.__class__(self.session, url) | python | {
"resource": ""
} |
q271838 | LuminosoClient._get_default_account | test | def _get_default_account(self):
"""
Get the ID of an account you can use to access projects.
"""
newclient = self.__class__(self.session, self.root_url)
account_info = newclient.get('/accounts/')
if account_info['default_account'] is not None:
return account_info['default_account']
valid_accounts = [a['account_id'] for a in account_info['accounts']
if a['account_id'] != 'public']
if len(valid_accounts) == 0:
raise ValueError("Can't determine your default URL. "
"Please request a specific URL or ask "
"Luminoso for support.")
return valid_accounts[0] | python | {
"resource": ""
} |
q271839 | LuminosoClient.documentation | test | def documentation(self):
"""
Get the documentation that the server sends for the API.
"""
newclient = self.__class__(self.session, self.root_url)
return newclient.get_raw('/') | python | {
"resource": ""
} |
q271840 | LuminosoClient.wait_for | test | def wait_for(self, job_id, base_path=None, interval=5):
"""
Wait for an asynchronous task to finish.
Unlike the thin methods elsewhere on this object, this one is actually
specific to how the Luminoso API works. This will poll an API
endpoint to find out the status of the job numbered `job_id`,
repeating every 5 seconds (by default) until the job is done. When
the job is done, it will return an object representing the result of
that job.
In the Luminoso API, requests that may take a long time return a
job ID instead of a result, so that your code can continue running
in the meantime. When it needs the job to be done to proceed, it can
use this method to wait.
The base URL where it looks for that job is by default `jobs/id/`
under the current URL, assuming that this LuminosoClient's URL
represents a project. You can specify a different URL by changing
`base_path`.
If the job failed, will raise a LuminosoError with the job status
as its message.
"""
if base_path is None:
base_path = 'jobs/id'
path = '%s%d' % (ensure_trailing_slash(base_path), job_id)
start = time.time()
next_log = 0
while True:
response = self.get(path)
if response['stop_time']:
if response['success']:
return response
else:
raise LuminosoError(response)
elapsed = time.time() - start
if elapsed > next_log:
logger.info('Still waiting (%d seconds elapsed).', next_log)
next_log += 120
time.sleep(interval) | python | {
"resource": ""
} |
q271841 | LuminosoClient.get_raw | test | def get_raw(self, path, **params):
"""
Get the raw text of a response.
This is only generally useful for specific URLs, such as documentation.
"""
url = ensure_trailing_slash(self.url + path.lstrip('/'))
return self._request('get', url, params=params).text | python | {
"resource": ""
} |
q271842 | _print_csv | test | def _print_csv(result):
"""Print a JSON list of JSON objects in CSV format."""
if type(result) is not list:
raise TypeError("output not able to be displayed as CSV.")
first_line = result[0]
w = csv.DictWriter(sys.stdout, fieldnames=sorted(first_line.keys()))
w.writeheader()
for line in result:
w.writerow(line) | python | {
"resource": ""
} |
q271843 | _read_params | test | def _read_params(input_file, json_body, p_params):
"""Read parameters from input file, -j, and -p arguments, in that order."""
params = {}
try:
if input_file:
params.update(json.load(input_file))
if json_body is not None:
params.update(json.loads(json_body))
except ValueError as e:
raise ValueError("input is not valid JSON: %s" % e)
try:
params.update({p.split('=', 1)[0]: p.split('=', 1)[1] for p in p_params})
except IndexError:
raise ValueError("--param arguments must have key=value format")
return params | python | {
"resource": ""
} |
q271844 | _simplify_doc | test | def _simplify_doc(doc):
"""
Limit a document to just the three fields we should upload.
"""
# Mutate a copy of the document to fill in missing fields
doc = dict(doc)
if 'text' not in doc:
raise ValueError("The document {!r} has no text field".format(doc))
return {
'text': doc['text'],
'metadata': doc.get('metadata', []),
'title': doc.get('title', '')
} | python | {
"resource": ""
} |
q271845 | create_project_with_docs | test | def create_project_with_docs(
client, docs, language, name, account=None, progress=False
):
"""
Given an iterator of documents, upload them as a Luminoso project.
"""
description = 'Uploaded using lumi-upload at {}'.format(time.asctime())
if account is not None:
proj_record = client.post(
'projects',
name=name,
language=language,
description=description,
account_id=account,
)
else:
proj_record = client.post(
'projects', name=name, language=language, description=description
)
proj_id = proj_record['project_id']
proj_client = client.client_for_path('projects/' + proj_id)
try:
if progress:
progress_bar = tqdm(desc='Uploading documents')
else:
progress_bar = None
for batch in _batches(docs, BATCH_SIZE):
docs_to_upload = [_simplify_doc(doc) for doc in batch]
proj_client.post('upload', docs=docs_to_upload)
if progress:
progress_bar.update(BATCH_SIZE)
finally:
if progress:
progress_bar.close()
print('The server is building project {!r}.'.format(proj_id))
proj_client.post('build')
while True:
time.sleep(10)
proj_status = proj_client.get()
build_info = proj_status['last_build_info']
if 'success' in build_info:
if not build_info['success']:
raise LuminosoServerError(build_info['reason'])
return proj_status | python | {
"resource": ""
} |
q271846 | upload_docs | test | def upload_docs(
client, input_filename, language, name, account=None, progress=False
):
"""
Given a LuminosoClient pointing to the root of the API, and a filename to
read JSON lines from, create a project from the documents in that file.
"""
docs = iterate_json_lines(input_filename)
return create_project_with_docs(
client, docs, language, name, account, progress=progress
) | python | {
"resource": ""
} |
q271847 | _main | test | def _main(argv):
"""
Handle arguments for the 'lumi-upload' command.
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-b',
'--base-url',
default=URL_BASE,
help='API root url, default: %s' % URL_BASE,
)
parser.add_argument(
'-a',
'--account-id',
default=None,
help='Account ID that should own the project, if not the default',
)
parser.add_argument(
'-l',
'--language',
default='en',
help='The language code for the language the text is in. Default: en',
)
parser.add_argument('-t', '--token', help="API authentication token")
parser.add_argument(
'-s',
'--save-token',
action='store_true',
help='save --token for --base-url to ~/.luminoso/tokens.json',
)
parser.add_argument(
'input_filename',
help='The JSON-lines (.jsons) file of documents to upload',
)
parser.add_argument(
'project_name',
nargs='?',
default=None,
help='What the project should be called',
)
args = parser.parse_args(argv)
if args.save_token:
if not args.token:
raise ValueError("error: no token provided")
LuminosoClient.save_token(args.token,
domain=urlparse(args.base_url).netloc)
client = LuminosoClient.connect(url=args.base_url, token=args.token)
name = args.project_name
if name is None:
name = input('Enter a name for the project: ')
if not name:
print('Aborting because no name was provided.')
return
result = upload_docs(
client,
args.input_filename,
args.language,
name,
account=args.account_id,
progress=True,
)
print(
'Project {!r} created with {} documents'.format(
result['project_id'], result['document_count']
)
) | python | {
"resource": ""
} |
q271848 | upload_stream | test | def upload_stream(stream, server, account, projname, language=None,
username=None, password=None,
append=False, stage=False):
"""
Given a file-like object containing a JSON stream, upload it to
Luminoso with the given account name and project name.
"""
client = LuminosoClient.connect(server,
username=username, password=password)
if not append:
# If we're not appending to an existing project, create new project.
info = client.post('/projects/' + account, name=projname)
project_id = info['project_id']
print('New project ID:', project_id)
else:
projects = client.get('/projects/' + account, name=projname)
if len(projects) == 0:
print('No such project exists!')
return
if len(projects) > 1:
print('Warning: Multiple projects with name "%s". ' % projname,
end='')
project_id = projects[0]['project_id']
print('Using existing project with id %s.' % project_id)
project = client.change_path('/projects/' + account + '/' + project_id)
counter = 0
for batch in batches(stream, 1000):
counter += 1
documents = list(batch)
project.upload('docs', documents)
print('Uploaded batch #%d' % (counter))
if not stage:
# Calculate the docs into the assoc space.
print('Calculating.')
kwargs = {}
if language is not None:
kwargs = {'language': language}
job_id = project.post('docs/recalculate', **kwargs)
project.wait_for(job_id) | python | {
"resource": ""
} |
q271849 | upload_file | test | def upload_file(filename, server, account, projname, language=None,
username=None, password=None,
append=False, stage=False, date_format=None):
"""
Upload a file to Luminoso with the given account and project name.
Given a file containing JSON, JSON stream, or CSV data, this verifies
that we can successfully convert it to a JSON stream, then uploads that
JSON stream.
"""
stream = transcode_to_stream(filename, date_format)
upload_stream(stream_json_lines(stream),
server, account, projname, language=language,
username=username, password=password,
append=append, stage=stage) | python | {
"resource": ""
} |
q271850 | main | test | def main():
"""
Handle command line arguments, to upload a file to a Luminoso project
as a script.
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('account')
parser.add_argument('project_name')
parser.add_argument(
'--append',
help=("If append flag is used, upload documents to existing project, "
"rather than creating a new project."),
action="store_true"
)
parser.add_argument(
'-s', '--stage',
help="If stage flag is used, just upload docs, don't recalculate.",
action="store_true"
)
parser.add_argument(
'-a', '--api-url',
help="Specify an alternate API url",
default=URL_BASE
)
parser.add_argument(
'-l', '--language',
help=("Two-letter language code to use when recalculating (e.g. 'en' "
"or 'ja')")
)
parser.add_argument(
'-u', '--username', default=None,
help="username (defaults to your username on your computer)"
)
parser.add_argument(
'-p', '--password', default=None,
help="password (you can leave this out and type it in later)"
)
parser.add_argument(
'-d', '--date-format', default='iso',
help=("format string for parsing dates, following "
"http://strftime.org/. Default is 'iso', which is "
"'%%Y-%%m-%%dT%%H:%%M:%%S+00:00'. Other shortcuts are 'epoch' "
"for epoch time or 'us-standard' for '%%m/%%d/%%y'")
)
args = parser.parse_args()
# Implement some human-understandable shortcuts for date_format
date_format_lower = args.date_format.lower()
if date_format_lower == 'iso':
date_format = '%Y-%m-%dT%H:%M:%S+00:00'
elif date_format_lower in ['unix', 'epoch']:
date_format = 'epoch'
elif date_format_lower == 'us-standard':
date_format = '%m/%d/%y'
else:
date_format = args.date_format
upload_file(args.filename, args.api_url, args.account, args.project_name,
language=args.language,
username=args.username, password=args.password,
append=args.append, stage=args.stage,
date_format=date_format) | python | {
"resource": ""
} |
q271851 | TokenAuth.from_user_creds | test | def from_user_creds(cls, username, password, url=URL_BASE):
"""
Obtain a short-lived token using a username and password, and use that
token to create an auth object.
"""
session = requests.session()
token_resp = session.post(url.rstrip('/') + '/user/login/',
data={'username': username,
'password': password})
if token_resp.status_code != 200:
error = token_resp.text
try:
error = json.loads(error)['error']
except (KeyError, ValueError):
pass
raise LuminosoLoginError(error)
return cls(token_resp.json()['result']['token']) | python | {
"resource": ""
} |
q271852 | LinkyClient.login | test | def login(self):
"""Set http session."""
if self._session is None:
self._session = requests.session()
# adding fake user-agent header
self._session.headers.update({'User-agent': str(UserAgent().random)})
return self._post_login_page() | python | {
"resource": ""
} |
q271853 | LinkyClient._post_login_page | test | def _post_login_page(self):
"""Login to enedis."""
data = {
'IDToken1': self.username,
'IDToken2': self.password,
'SunQueryParamsString': base64.b64encode(b'realm=particuliers'),
'encoded': 'true',
'gx_charset': 'UTF-8'
}
try:
self._session.post(LOGIN_URL,
data=data,
allow_redirects=False,
timeout=self._timeout)
except OSError:
raise PyLinkyError("Can not submit login form")
if 'iPlanetDirectoryPro' not in self._session.cookies:
raise PyLinkyError("Login error: Please check your username/password.")
return True | python | {
"resource": ""
} |
q271854 | LinkyClient._get_data | test | def _get_data(self, p_p_resource_id, start_date=None, end_date=None):
"""Get data."""
data = {
'_' + REQ_PART + '_dateDebut': start_date,
'_' + REQ_PART + '_dateFin': end_date
}
params = {
'p_p_id': REQ_PART,
'p_p_lifecycle': 2,
'p_p_state': 'normal',
'p_p_mode': 'view',
'p_p_resource_id': p_p_resource_id,
'p_p_cacheability': 'cacheLevelPage',
'p_p_col_id': 'column-1',
'p_p_col_pos': 1,
'p_p_col_count': 3
}
try:
raw_res = self._session.post(DATA_URL,
data=data,
params=params,
allow_redirects=False,
timeout=self._timeout)
if 300 <= raw_res.status_code < 400:
raw_res = self._session.post(DATA_URL,
data=data,
params=params,
allow_redirects=False,
timeout=self._timeout)
except OSError as e:
raise PyLinkyError("Could not access enedis.fr: " + str(e))
if raw_res.text is "":
raise PyLinkyError("No data")
if 302 == raw_res.status_code and "/messages/maintenance.html" in raw_res.text:
raise PyLinkyError("Site in maintenance")
try:
json_output = raw_res.json()
except (OSError, json.decoder.JSONDecodeError, simplejson.errors.JSONDecodeError) as e:
raise PyLinkyError("Impossible to decode response: " + str(e) + "\nResponse was: " + str(raw_res.text))
if json_output.get('etat').get('valeur') == 'erreur':
raise PyLinkyError("Enedis.fr answered with an error: " + str(json_output))
return json_output.get('graphe') | python | {
"resource": ""
} |
q271855 | LinkyClient.fetch_data | test | def fetch_data(self):
"""Get the latest data from Enedis."""
for t in [HOURLY, DAILY, MONTHLY, YEARLY]:
self._data[t] = self.get_data_per_period(t) | python | {
"resource": ""
} |
q271856 | BaseHandler.prepare | test | def prepare(self):
""" Load the view on first load """
if self.__class__.view:
return
#: Load the View class from the dotted view name
with enaml.imports():
View = pydoc.locate(self.page.view)
assert View, "Failed to import View: {}".format(self.page.view)
#: Set initial view properties
self.__class__.view = View(
site=self.site,
page=self.page,
request=self.request,
) | python | {
"resource": ""
} |
q271857 | DemoHandler.initialize | test | def initialize(self):
""" Load the view on first load could also load based on session, group, etc..
"""
if self.__class__.view:
self.view.handler = self
self.view.request = self.request
return
#: Load the View class from the dotted view name
with enaml.imports():
from views.index import View
#: Set initial view properties
self.__class__.view = View(
company=current_company,
request=self.request,
handler=self,
) | python | {
"resource": ""
} |
q271858 | DemoHandler.get | test | def get(self, *args, **kwargs):
#: Render view for get request, view is cached for websocket
""" Execute the correct handler depending on what is connecting. """
if self.is_websocket():
return super(DemoHandler, self).get(*args, **kwargs)
else:
#return tornado.web.RequestHandler.get(self, *args, **kwargs)
self.write(self.view.render()) | python | {
"resource": ""
} |
q271859 | DemoHandler.on_message | test | def on_message(self, message):
""" When enaml.js sends a message """
#: Decode message
change = tornado.escape.json_decode(message)
#print change
#: Get the owner ID
ref = change.get('ref')
if not ref:
return
#: Get the server side representation of the node
#: If found will return the View declaration node
node = self.view.xpath('//*[@ref="{}"]'.format(ref), first=True)
if node is None:
return
#: Handle the event
if change.get('type') and change.get('name'):
if change['type'] == 'event':
#: Trigger the event
trigger = getattr(node, change['name'])
trigger()
if change['type'] == 'update':
#: Trigger the update
setattr(node, change['name'], change['value']) | python | {
"resource": ""
} |
q271860 | Site._update_menus | test | def _update_menus(self,change):
""" When pages change, update the menus"""
menus = {}
#: Get all links
links = [p.link for p in self.pages if p.link] + self.links
#: Put all links in the correct menu
for link in links:
for menu in link.menus:
if menu not in menus:
menus[menu] = []
menus[menu].append(link)
#: Update the menus
for name,menu in menus.items():
k = '{}_menu'.format(name)
if hasattr(self,k):
setattr(self,k,menu) | python | {
"resource": ""
} |
q271861 | Site._default_handlers | test | def _default_handlers(self):
""" Generate the handlers for this site """
static_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"static"))
urls = [
(r"/static/(.*)", cyclone.web.StaticFileHandler, {"path": static_path}),
]
for p in self.pages:
handler = p.handler
handler.site = self
handler.page = p
urls.append((p.link.url,handler))
return urls | python | {
"resource": ""
} |
q271862 | ViewerWebSocket.on_message | test | def on_message(self, message):
""" When we get an event from js, lookup the node and invoke the
action on the enaml node.
"""
change = json.loads(message)
log.debug(f'Update from js: {change}')
# Lookup the node
ref = change.get('ref')
if not ref:
return
nodes = self.viewer.xpath('//*[@ref=$ref]', ref=ref)
if not nodes:
return # Unknown node
node = nodes[0]
# Trigger the change on the enaml node
if change.get('type') and change.get('name'):
if change['type'] == 'event':
trigger = getattr(node, change['name'])
trigger()
elif change['type'] == 'update':
# Trigger the update
setattr(node, change['name'], change['value'])
else:
log.warning(f"Unhandled event {self} {node}: {change}") | python | {
"resource": ""
} |
q271863 | ViewerWebSocket.on_dom_modified | test | def on_dom_modified(self, change):
""" When an event from enaml occurs, send it out the websocket
so the client's browser can update accordingly.
"""
log.debug(f'Update from enaml: {change}')
self.write_message(json.dumps(change['value'])) | python | {
"resource": ""
} |
q271864 | WebComponent.create_widget | test | def create_widget(self):
""" Create the toolkit widget for the proxy object.
This method is called during the top-down pass, just before the
'init_widget()' method is called. This method should create the
toolkit widget and assign it to the 'widget' attribute.
"""
self.widget = SubElement(self.parent_widget(), self.declaration.tag) | python | {
"resource": ""
} |
q271865 | WebComponent.init_widget | test | def init_widget(self):
""" Initialize the state of the toolkit widget.
This method is called during the top-down pass, just after the
'create_widget()' method is called. This method should init the
state of the widget. The child widgets will not yet be created.
"""
widget = self.widget
d = self.declaration
#: Save ref id
ref = d.ref
CACHE[ref] = atomref(self)
widget.set('ref', ref)
if d.text:
self.set_text(d.text)
if d.tail:
self.set_tail(d.tail)
if d.style:
self.set_style(d.style)
if d.cls:
self.set_cls(d.cls)
if d.attrs:
self.set_attrs(d.attrs)
if d.id:
widget.set('id', d.id)
if d.draggable:
self.set_draggable(d.draggable)
# Set any attributes that may be defined
for name, member in d.members().items():
if not member.metadata:
continue
meta = member.metadata
# Exclude any attr tags
if not (meta.get('d_member') and meta.get('d_final')):
continue
# Skip any items with attr=false
elif not meta.get('attr', True):
continue
elif isinstance(member, Event):
continue
value = getattr(d, name)
if value:
self.set_attribute(name, value) | python | {
"resource": ""
} |
q271866 | WebComponent.destroy | test | def destroy(self):
""" A reimplemented destructor.
This destructor will clear the reference to the toolkit widget
and set its parent to None.
"""
widget = self.widget
if widget is not None:
parent = widget.getparent()
if parent is not None:
parent.remove(widget)
del self.widget
d = self.declaration
try:
del CACHE[d.ref]
except KeyError:
pass
super(WebComponent, self).destroy() | python | {
"resource": ""
} |
q271867 | WebComponent.child_added | test | def child_added(self, child):
""" Handle the child added event from the declaration.
This handler will insert the child toolkit widget in the correct.
position. Subclasses which need more control should reimplement this
method.
"""
super(WebComponent, self).child_added(child)
if child.widget is not None:
# Use insert to put in the correct spot
for i, c in enumerate(self.children()):
if c == child:
self.widget.insert(i, child.widget)
break | python | {
"resource": ""
} |
q271868 | WebComponent.child_removed | test | def child_removed(self, child):
""" Handle the child removed event from the declaration.
This handler will unparent the child toolkit widget. Subclasses
which need more control should reimplement this method.
"""
super(WebComponent, self).child_removed(child)
if child.widget is not None:
for i, c in enumerate(self.children()):
if c == child:
del self.widget[i]
break | python | {
"resource": ""
} |
q271869 | WebComponent.child_widgets | test | def child_widgets(self):
""" Get the child toolkit widgets for this object.
Returns
-------
result : iterable of QObject
The child widgets defined for this object.
"""
for child in self.children():
w = child.widget
if w is not None:
yield w | python | {
"resource": ""
} |
q271870 | WebComponent.set_attribute | test | def set_attribute(self, name, value):
""" Default handler for those not explicitly defined """
if value is True:
self.widget.set(name, name)
elif value is False:
del self.widget.attrib[name]
else:
self.widget.set(name, str(value)) | python | {
"resource": ""
} |
q271871 | Tag._update_proxy | test | def _update_proxy(self, change):
""" Update the proxy widget when the Widget data
changes.
"""
#: Try default handler
if change['type'] == 'update' and self.proxy_is_active:
handler = getattr(self.proxy, 'set_' + change['name'], None)
if handler is not None:
handler(change['value'])
else:
self.proxy.set_attribute(change['name'], change['value'])
self._notify_modified(change) | python | {
"resource": ""
} |
q271872 | Tag._notify_modified | test | def _notify_modified(self, change):
""" If a change occurs when we have a websocket connection active
notify the websocket client of the change.
"""
root = self.root_object()
if isinstance(root, Html):
name = change['name']
change = {
'ref': self.ref,
'type': change['type'],
'name': change['name'],
'value': change['value']
}
root.modified(change) | python | {
"resource": ""
} |
q271873 | Tag.xpath | test | def xpath(self, query, **kwargs):
""" Find nodes matching the given xpath query """
nodes = self.proxy.find(query, **kwargs)
return [n.declaration for n in nodes] | python | {
"resource": ""
} |
q271874 | Tag.prepare | test | def prepare(self, **kwargs):
""" Prepare for rendering """
for k, v in kwargs.items():
setattr(self, k, v)
if not self.is_initialized:
self.initialize()
if not self.proxy_is_active:
self.activate_proxy() | python | {
"resource": ""
} |
q271875 | RawComponent.init_widget | test | def init_widget(self):
""" Initialize the widget with the source. """
d = self.declaration
if d.source:
self.set_source(d.source)
else:
super(RawComponent, self).init_widget() | python | {
"resource": ""
} |
q271876 | RawComponent.set_source | test | def set_source(self, source):
""" Set the source by parsing the source and inserting it into the
component.
"""
self.widget.clear()
html = etree.HTML(source)
self.widget.extend(html[0])
# Clear removes everything so it must be reinitialized
super(RawComponent, self).init_widget() | python | {
"resource": ""
} |
q271877 | Block._observe_mode | test | def _observe_mode(self, change):
""" If the mode changes. Refresh the items.
"""
block = self.block
if block and self.is_initialized and change['type'] == 'update':
if change['oldvalue'] == 'replace':
raise NotImplementedError
for c in self.children:
block.children.remove(c)
c.set_parent(None)
self.refresh_items() | python | {
"resource": ""
} |
q271878 | Block._observe_block | test | def _observe_block(self, change):
""" A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True.
"""
if self.is_initialized and change['type'] == 'update':
old_block = change['oldvalue']
for c in self.children:
old_block.children.remove(c)
c.set_parent(None)
self.refresh_items() | python | {
"resource": ""
} |
q271879 | Block._observe__children | test | def _observe__children(self, change):
""" When the children of the block change. Update the referenced
block.
"""
if not self.is_initialized or change['type'] != 'update':
return
block = self.block
new_children = change['value']
old_children = change['oldvalue']
for c in old_children:
if c not in new_children and not c.is_destroyed:
c.destroy()
else:
c.set_parent(None)
if block:
# This block is inserting into another block
before = None
if self.mode == 'replace':
block.children = []
if self.mode == 'prepend' and block.children:
before = block.children[0]
block.insert_children(before, new_children)
else:
# This block is a placeholder
self.parent.insert_children(self, new_children) | python | {
"resource": ""
} |
q271880 | read | test | def read(*pathcomponents):
"""Read the contents of a file located relative to setup.py"""
with open(join(abspath(dirname(__file__)), *pathcomponents)) as thefile:
return thefile.read() | python | {
"resource": ""
} |
q271881 | error | test | def error(msg, exit_code):
"""
Print `msg` error and exit with status `exit_code`
"""
sys.stderr.write("%s\ntry 'mongotail --help' for more information\n" % msg)
sys.stderr.flush()
exit(exit_code) | python | {
"resource": ""
} |
q271882 | error_parsing | test | def error_parsing(msg="unknown options"):
"""
Print any parsing error and exit with status -1
"""
sys.stderr.write("Error parsing command line: %s\ntry 'mongotail --help' for more information\n" % msg)
sys.stderr.flush()
exit(EINVAL) | python | {
"resource": ""
} |
q271883 | Menu.get_product_by_name | test | def get_product_by_name(self, name):
'''
Gets a Item from the Menu by name. Note that the name is not
case-sensitive but must be spelt correctly.
:param string name: The name of the item.
:raises StopIteration: Raises exception if no item is found.
:return: An item object matching the search.
:rtype: Item
'''
return next(i for i in self.items if i.name.lower() == name.lower()) | python | {
"resource": ""
} |
q271884 | Client.new_session | test | def new_session(self, session):
'''
Clear out the current session on the remote and setup a new one.
:return: A response from having expired the current session.
:rtype: requests.Response
'''
response = self.__get('/Home/SessionExpire')
self.session = update_session_headers(session)
return response | python | {
"resource": ""
} |
q271885 | Client.reset_store | test | def reset_store(self):
'''
Clears out the current store and gets a cookie. Set the cross site
request forgery token for each subsequent request.
:return: A response having cleared the current store.
:rtype: requests.Response
'''
response = self.__get('/Store/Reset')
token = self.session.cookies['XSRF-TOKEN']
self.session.headers.update({'X-XSRF-TOKEN': token})
return response | python | {
"resource": ""
} |
q271886 | Client.get_stores | test | def get_stores(self, search_term):
'''
Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list
'''
params = {'SearchText': search_term}
response = self.__get('/storefindermap/storesearch', params=params)
return Stores(response.json()) | python | {
"resource": ""
} |
q271887 | Client.set_delivery_system | test | def set_delivery_system(self, store, postcode, fulfilment_method=FULFILMENT_METHOD.DELIVERY):
'''
Set local cookies by initialising the delivery system on the remote.
Requires a store ID and a delivery postcode.
:param Store store: Store id.
:param string postcode: A postcode.
:return: A response having initialised the delivery system.
:rtype: requests.Response
'''
method = 'delivery' if fulfilment_method == FULFILMENT_METHOD.DELIVERY else 'collection'
params = {
'fulfilmentMethod': method,
'postcode': postcode,
'storeid': store.store_id
}
return self.__post('/Journey/Initialize', json=params) | python | {
"resource": ""
} |
q271888 | Client.get_menu | test | def get_menu(self, store):
'''
Retrieve the menu from the selected store.
:param Store store: A store.
:return: The store menu.
:rtype: Menu
'''
params = {
'collectionOnly': not store.delivery_available,
'menuVersion': store.menu_version,
'storeId': store.store_id,
}
response = self.__get('/ProductCatalog/GetStoreCatalog', params=params)
return Menu(response.json()) | python | {
"resource": ""
} |
q271889 | Client.add_item_to_basket | test | def add_item_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=1):
'''
Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param int quantity: The quantity of item to be added.
:return: A response having added an item to the current basket.
:rtype: requests.Response
'''
item_type = item.type
if item_type == 'Pizza':
return self.add_pizza_to_basket(item, variant, quantity)
elif item_type == 'Side':
return self.add_side_to_basket(item, quantity)
return None | python | {
"resource": ""
} |
q271890 | Client.add_pizza_to_basket | test | def add_pizza_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=1):
'''
Add a pizza to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Some defaults are defined in the VARIANT enum.
:param int quantity: The quantity of pizza to be added.
:return: A response having added a pizza to the current basket.
:rtype: requests.Response
'''
item_variant = item[variant]
ingredients = item_variant['ingredients'].update([36, 42])
params = {
'stepId': 0,
'quantity': quantity,
'sizeId': variant,
'productId': item.item_id,
'ingredients': ingredients,
'productIdHalfTwo': 0,
'ingredientsHalfTwo': [],
'recipeReferrer': 0
}
return self.__post('/Basket/AddPizza', json=params) | python | {
"resource": ""
} |
q271891 | Client.add_side_to_basket | test | def add_side_to_basket(self, item, quantity=1):
'''
Add a side to the current basket.
:param Item item: Item from menu.
:param int quantity: The quantity of side to be added.
:return: A response having added a side to the current basket.
:rtype: requests.Response
'''
item_variant = item[VARIANT.PERSONAL]
params = {
'productSkuId': item_variant['productSkuId'],
'quantity': quantity,
'ComplimentaryItems': []
}
return self.__post('/Basket/AddProduct', json=params) | python | {
"resource": ""
} |
q271892 | Client.remove_item_from_basket | test | def remove_item_from_basket(self, idx):
'''
Remove an item from the current basket.
:param int idx: Basket item id.
:return: A response having removed an item from the current basket.
:rtype: requests.Response
'''
params = {
'basketItemId': idx,
'wizardItemDelete': False
}
return self.__post('/Basket/RemoveBasketItem', json=params) | python | {
"resource": ""
} |
q271893 | Client.set_payment_method | test | def set_payment_method(self, method=PAYMENT_METHOD.CASH_ON_DELIVERY):
'''
Select the payment method going to be used to make a purchase.
:param int method: Payment method id.
:return: A response having set the payment option.
:rtype: requests.Response
'''
params = {'paymentMethod': method}
return self.__post('/PaymentOptions/SetPaymentMethod', json=params) | python | {
"resource": ""
} |
q271894 | Client.process_payment | test | def process_payment(self):
'''
Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response
'''
params = {
'__RequestVerificationToken': self.session.cookies,
'method': 'submit'
}
return self.__post('/PaymentOptions/Proceed', json=params) | python | {
"resource": ""
} |
q271895 | Client.__get | test | def __get(self, path, **kargs):
'''
Make a HTTP GET request to the Dominos UK API with the given parameters
for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.get, path, **kargs) | python | {
"resource": ""
} |
q271896 | Client.__post | test | def __post(self, path, **kargs):
'''
Make a HTTP POST request to the Dominos UK API with the given
parameters for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.post, path, **kargs) | python | {
"resource": ""
} |
q271897 | Client.__call_api | test | def __call_api(self, verb, path, **kargs):
'''
Make a HTTP request to the Dominos UK API with the given parameters for
the current session.
:param verb func: HTTP method on the session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
response = verb(self.__url(path), **kargs)
if response.status_code != 200:
raise ApiError('{}: {}'.format(response.status_code, response))
return response | python | {
"resource": ""
} |
q271898 | CursesMenu.append_item | test | def append_item(self, item):
"""
Add an item to the end of the menu before the exit item
:param MenuItem item: The item to be added
"""
did_remove = self.remove_exit()
item.menu = self
self.items.append(item)
if did_remove:
self.add_exit()
if self.screen:
max_row, max_cols = self.screen.getmaxyx()
if max_row < 6 + len(self.items):
self.screen.resize(6 + len(self.items), max_cols)
self.draw() | python | {
"resource": ""
} |
q271899 | CursesMenu.add_exit | test | def add_exit(self):
"""
Add the exit item if necessary. Used to make sure there aren't multiple exit items
:return: True if item needed to be added, False otherwise
:rtype: bool
"""
if self.items:
if self.items[-1] is not self.exit_item:
self.items.append(self.exit_item)
return True
return False | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.