code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def local_variable_action(self, text, loc, var):
exshared.setpos(loc, text)
if DEBUG > 0:
print("LOCAL_VAR:",var, var.name, var.type)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_local_var(var.name, var.type, self.shared.function_vars)
self.shared.function_vars += 1
return index | Code executed after recognising a local variable |
def handle_existing_user(self, provider, user, access, info):
"Login user and redirect."
login(self.request, user)
return redirect(self.get_login_redirect(provider, user, access)) | Login user and redirect. |
def _make_nested_list(self, gen):
res = []
for ele in gen:
if ele is None:
res.append(None)
elif not is_string_like(ele) and is_generator_like(ele):
res.append(self._make_nested_list(ele))
else:
res.append(ele)
return res | Makes nested list from generator for creating numpy.array |
def stats(args):
logger.info("Reading sequeces")
data = parse_ma_file(args.ma)
logger.info("Get sequences from sam")
is_align = _read_sam(args.sam)
is_json, is_db = _read_json(args.json)
res = _summarise_sam(data, is_align, is_json, is_db)
_write_suma(res, os.path.join(args.out, "stats_align.dat"))
logger.info("Done") | Create stats from the analysis |
def on_failure(self, exc, task_id, args, kwargs, einfo):
key = self._get_cache_key(args, kwargs)
_, penalty = cache.get(key, (0, 0))
if penalty < self.MAX_PENALTY:
penalty += 1
logger.debug('The task %s is penalized and will be executed on %d run.' % (self.name, penalty))
cache.set(key, (penalty, penalty), self.CACHE_LIFETIME)
return super(PenalizedBackgroundTask, self).on_failure(exc, task_id, args, kwargs, einfo) | Increases penalty for the task and resets the counter. |
def com_google_fonts_check_ligature_carets(ttFont, ligature_glyphs):
if ligature_glyphs == -1:
yield FAIL, Message("malformed", "Failed to lookup ligatures."
" This font file seems to be malformed."
" For more info, read:"
" https://github.com"
"/googlefonts/fontbakery/issues/1596")
elif "GDEF" not in ttFont:
yield WARN, Message("GDEF-missing",
("GDEF table is missing, but it is mandatory"
" to declare it on fonts that provide ligature"
" glyphs because the caret (text cursor)"
" positioning for each ligature must be"
" provided in this table."))
else:
lig_caret_list = ttFont["GDEF"].table.LigCaretList
if lig_caret_list is None:
missing = set(ligature_glyphs)
else:
missing = set(ligature_glyphs) - set(lig_caret_list.Coverage.glyphs)
if lig_caret_list is None or lig_caret_list.LigGlyphCount == 0:
yield WARN, Message("lacks-caret-pos",
("This font lacks caret position values for"
" ligature glyphs on its GDEF table."))
elif missing:
missing = "\n\t- ".join(missing)
yield WARN, Message("incomplete-caret-pos-data",
("This font lacks caret positioning"
" values for these ligature glyphs:"
f"\n\t- {missing}\n\n "))
else:
yield PASS, "Looks good!" | Are there caret positions declared for every ligature? |
def create_experiment_summary():
temperature_list = struct_pb2.ListValue()
temperature_list.extend(TEMPERATURE_LIST)
materials = struct_pb2.ListValue()
materials.extend(HEAT_COEFFICIENTS.keys())
return summary.experiment_pb(
hparam_infos=[
api_pb2.HParamInfo(name='initial_temperature',
display_name='Initial temperature',
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list),
api_pb2.HParamInfo(name='ambient_temperature',
display_name='Ambient temperature',
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list),
api_pb2.HParamInfo(name='material',
display_name='Material',
type=api_pb2.DATA_TYPE_STRING,
domain_discrete=materials)
],
metric_infos=[
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag='temperature/current/scalar_summary'),
display_name='Current Temp.'),
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag='temperature/difference_to_ambient/scalar_summary'),
display_name='Difference To Ambient Temp.'),
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag='delta/scalar_summary'),
display_name='Delta T')
]
) | Returns a summary proto buffer holding this experiment. |
def should_exclude(self, filename) -> bool:
for skip_glob in self.skip_globs:
if self.filename_matches_glob(filename, skip_glob):
return True
return False | Should we exclude this file from consideration? |
def add_crosshair_to_image(fname, opFilename):
im = Image.open(fname)
draw = ImageDraw.Draw(im)
draw.line((0, 0) + im.size, fill=(255, 255, 255))
draw.line((0, im.size[1], im.size[0], 0), fill=(255, 255, 255))
del draw
im.save(opFilename) | convert an image by adding a cross hair |
def _wrapped(self):
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
return wrapper | Wrap this udf with a function and attach docstring from func |
def pause(message='Press any key to continue . . . '):
if message is not None:
print(message, end='')
sys.stdout.flush()
getch()
print() | Prints the specified message if it's not None and waits for a keypress. |
def valid_status(*valid):
def decorator(func):
@functools.wraps(func)
def _valid_status(self, *args, **kwargs):
if self.status not in valid:
raise protocol.ProtocolError(
"`%s` called while in state: %s, valid: (%s)" % (
func.__name__, self.status, ",".join(map(str, valid))))
return func(self, *args, **kwargs)
return _valid_status
return decorator | Decorator to assert that we're in a valid state. |
def generate_cdef():
include_libc_path = path.join(HERE, 'fake_libc_include')
include_vulkan_path = path.join(HERE, 'vulkan_include')
out_file = path.join(HERE, path.pardir, 'vulkan', 'vulkan.cdef.h')
header = path.join(include_vulkan_path, 'vulkan.h')
command = ['cpp',
'-std=c99',
'-P',
'-nostdinc',
'-I' + include_libc_path,
'-I' + include_vulkan_path,
'-o' + out_file,
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_WAYLAND_KHR',
'-DVK_USE_PLATFORM_ANDROID_KHR',
'-DVK_USE_PLATFORM_WIN32_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
header]
subprocess.run(command, check=True) | Generate the cdef output file |
def _line_shift(x:Tensor, mask:bool=False):
"Shift the line i of `x` by p-i elements to the left, is `mask` puts 0s on the diagonal."
bs,nh,n,p = x.size()
x_pad = torch.cat([x.new_zeros(bs,nh,n,1), x], dim=3)
x_shift = x_pad.view(bs,nh,p + 1,n)[:,:,1:].view_as(x)
if mask: x_shift.mul_(torch.tril(x.new_ones(n,p), p-n)[None,None,])
return x_shift | Shift the line i of `x` by p-i elements to the left, is `mask` puts 0s on the diagonal. |
async def login(self, email: str, password: str) -> bool:
login_resp = await self._request(
'post',
API_URL_USER,
json={
'version': '1.0',
'method': 'Signin',
'param': {
'Email': email,
'Password': password,
'CaptchaCode': ''
},
'sourcetype': 0
})
_LOGGER.debug('Login response: %s', login_resp)
if login_resp.get('Code') != 0:
return False
self.account_id = login_resp['Json']['gid']
return True | Login to the profile. |
def send_message() -> None:
if not _msg_queue:
return
msg = json.dumps(_msg_queue)
_msg_queue.clear()
for conn in module.connections:
conn.write_message(msg) | Send message via WS to all client connections. |
def validate(self):
if not isinstance(self.fields, dict):
raise TypeError(u'Expected dict fields, got: {} {}'.format(
type(self.fields).__name__, self.fields))
for key, value in six.iteritems(self.fields):
validate_safe_string(key)
if not isinstance(value, Expression):
raise TypeError(
u'Expected Expression values in the fields dict, got: '
u'{} -> {}'.format(key, value)) | Ensure that the ConstructResult block is valid. |
def setup_failures(self, gremlins):
assert isinstance(gremlins, dict) and 'gremlins' in gremlins
for gremlin in gremlins['gremlins']:
self.setup_failure(**gremlin)
self.push_rules() | Add gremlins to environment |
def _save_percolator(self):
index = Content.search_objects.mapping.index
query_filter = self.get_content().to_dict()
q = {}
if "query" in query_filter:
q = {"query": query_filter.get("query", {})}
else:
return
es.index(
index=index,
doc_type=".percolator",
body=q,
id=self.es_id
) | saves the query field as an elasticsearch percolator |
def _get_django_queryset(self):
prefetches = []
for field, fprefetch in self.prefetches.items():
has_query = hasattr(fprefetch, 'query')
qs = fprefetch.query.queryset if has_query else None
prefetches.append(
Prefetch(field, queryset=qs)
)
queryset = self.queryset
if prefetches:
queryset = queryset.prefetch_related(*prefetches)
return queryset | Return Django QuerySet with prefetches properly configured. |
def create_copy_without_data(G):
H = nx.Graph()
for i in H.nodes_iter():
H.node[i] = {}
return H | Return a copy of the graph G with all the data removed |
def func_dump(func):
code = marshal.dumps(func.__code__).decode('raw_unicode_escape')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure | Serialize user defined function. |
def url(self, name):
key = blobstore.create_gs_key('/gs' + name)
return images.get_serving_url(key) | Ask blobstore api for an url to directly serve the file |
def load_bytes(buf, num, pos):
end = pos + num
if end > len(buf):
raise BadRarFile('cannot load bytes')
return buf[pos : end], end | Load sequence of bytes |
def collect_lockfile_dependencies(lockfile_data):
output = {}
for dependencyName, installedVersion in lockfile_data.items():
output[dependencyName] = {
'source': 'example-package-manager',
'installed': {'name': installedVersion},
}
return output | Convert the lockfile format to the dependencies schema |
def notblocked(page):
for blocked in PAGES_TO_BLOCK:
if blocked[0] != '*':
blocked = '*' + blocked
rx = re.compile(blocked.replace('*', '[^$]*'))
if rx.match(page):
return False
return True | Determine if given url is a page that should be in sitemap. |
def _show_one(audio_file):
"given an audio file, print out the artist, title and some audio attributes of the song"
print 'File: ', audio_file
pytrack = track.track_from_filename(audio_file)
print 'Artist: ', pytrack.artist if hasattr(pytrack, 'artist') else 'Unknown'
print 'Title: ', pytrack.title if hasattr(pytrack, 'title') else 'Unknown'
print 'Track ID: ', pytrack.id
print 'Tempo: ', pytrack.tempo
print 'Energy: %1.3f %s' % (pytrack.energy, _bar(pytrack.energy))
if not pytrack.valence:
pytrack = track.track_from_filename(audio_file, force_upload=True)
print 'Valence: %1.3f %s' % (pytrack.valence, _bar(pytrack.valence))
print 'Acousticness: %1.3f %s' % (pytrack.acousticness, _bar(pytrack.acousticness))
print | given an audio file, print out the artist, title and some audio attributes of the song |
def map_to_linear(self, with_stocks: bool=False):
result = []
for ac in self.model.classes:
rows = self.__get_ac_tree(ac, with_stocks)
result += rows
return result | Maps the tree to a linear representation suitable for display |
def ecp_pot_str(pot):
am = pot['angular_momentum']
amchar = lut.amint_to_char(am)
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
point_places = [0, 10, 33]
s = 'Potential: {} potential\n'.format(amchar)
s += 'Type: {}\n'.format(pot['ecp_type'])
s += write_matrix([rexponents, gexponents, *coefficients], point_places)
return s | Return a string representing the data for an ECP potential |
def copy_settings(self, settings_module):
source = inspect.getsourcefile(settings_module)
dest = os.path.join(self.model_dir, 'settings.py')
shutil.copyfile(source, dest) | Copy settings module to the model_dir directory |
def send_error_explain(self, code, message=None, headers=None, content_type=None):
"do not use directly"
if headers is None:
headers = {}
if code in self.responses:
if message is None:
message = self.responses[code][0]
explain = self.responses[code][1]
else:
explain = ""
if message is None:
message = ""
if not isinstance(headers, dict):
headers = {}
if not content_type:
if self._cmd and self._cmd.content_type:
content_type = self._cmd.content_type
else:
content_type = self._DEFAULT_CONTENT_TYPE
if self._cmd and self._cmd.charset:
charset = self._cmd.charset
else:
charset = DEFAULT_CHARSET
headers['Content-type'] = "%s; charset=%s" % (content_type, charset)
data = self._mk_error_explain_data(code, message, explain)
self.end_response(self.build_response(code, data, headers)) | do not use directly |
async def expose(self):
app_facade = client.ApplicationFacade.from_connection(self.connection)
log.debug(
'Exposing %s', self.name)
return await app_facade.Expose(self.name) | Make this application publicly available over the network. |
def write_to_directory(self, dataset_info_dir):
if self.features:
self.features.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_filename(dataset_info_dir),
"w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_filename(dataset_info_dir),
"w") as f:
f.write(self.as_json) | Write `DatasetInfo` as JSON to `dataset_info_dir`. |
def available(self):
if not self.adb_server_ip:
return bool(self._adb)
try:
adb_devices = self._adb_client.devices()
try:
if any([self.host in dev.get_serial_no() for dev in adb_devices]):
if not self._available:
self._available = True
return True
if self._available:
logging.error('ADB server is not connected to the device.')
self._available = False
return False
except RuntimeError:
if self._available:
logging.error('ADB device is unavailable; encountered an error when searching for device.')
self._available = False
return False
except RuntimeError:
if self._available:
logging.error('ADB server is unavailable.')
self._available = False
return False | Check whether the ADB connection is intact. |
def _initialize_initial_state_fluents(self):
state_fluents = self.rddl.domain.state_fluents
initializer = self.rddl.instance.init_state
self.initial_state_fluents = self._initialize_pvariables(
state_fluents,
self.rddl.domain.state_fluent_ordering,
initializer)
return self.initial_state_fluents | Returns the initial state-fluents instantiated. |
def lerp(self, a, t):
return self.plus(a.minus(self).times(t)); | Lerp. Linear interpolation from self to a |
def steady_connection(self):
return SteadyPgConnection(
self._maxusage, self._setsession, self._closeable,
*self._args, **self._kwargs) | Get a steady, non-persistent PyGreSQL connection. |
def _trigger_rpc(self, device_info):
method = device_info.rpc_trigger
if isinstance(method, devices.RPCTriggerViaSWI):
self._jlink.memory_write32(method.register, [1 << method.bit])
else:
raise HardwareError("Unknown RPC trigger method", method=method) | Trigger an RPC in a device specific way. |
def toString(value):
if isinstance(value, basestring):
return value
elif type(value) in [np.string_, np.str_]:
return str(value)
elif type(value) == np.unicode_:
return unicode(value)
else:
raise TypeError("Could not convert %s to string type" % type(value)) | Convert a value to a string, if possible. |
def run_apidoc(_):
current_directory = os.path.abspath(os.path.dirname(__file__))
output_path = os.path.join(current_directory, 'source')
cmd_path = 'sphinx-apidoc'
if hasattr(sys, 'real_prefix'):
cmd_path = os.path.abspath(os.path.join(sys.prefix, 'bin', 'sphinx-apidoc'))
main([cmd_path, '-e', '-o', output_path, '../cinder_data', '--force']) | Heler function for run apidoc as part of the build. |
def group_callback(self, iocb):
if _debug: IOGroup._debug("group_callback %r", iocb)
for iocb in self.ioMembers:
if not iocb.ioComplete.isSet():
if _debug: IOGroup._debug(" - waiting for child: %r", iocb)
break
else:
if _debug: IOGroup._debug(" - all children complete")
self.ioState = COMPLETED
self.trigger() | Callback when a child iocb completes. |
def nested_srcmdl_xml(self, **kwargs):
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.nested_srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for source model xml files of nested sources |
def _apply_replace_backrefs(m, repl=None, flags=0):
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, ReplaceTemplate):
return repl.expand(m)
elif isinstance(repl, (str, bytes)):
return _bregex_parse._ReplaceParser().parse(m.re, repl, bool(flags & FORMAT)).expand(m) | Expand with either the `ReplaceTemplate` or compile on the fly, or return None. |
def main(arguments):
global verbose
global veryVerbose
global iteration_num
global single_score
global pr_flag
global match_triple_dict
iteration_num = arguments.r + 1
if arguments.ms:
single_score = False
if arguments.v:
verbose = True
if arguments.vv:
veryVerbose = True
if arguments.pr:
pr_flag = True
floatdisplay = "%%.%df" % arguments.significant
for (precision, recall, best_f_score) in score_amr_pairs(args.f[0], args.f[1],
justinstance=arguments.justinstance,
justattribute=arguments.justattribute,
justrelation=arguments.justrelation):
if pr_flag:
print("Precision: " + floatdisplay % precision)
print("Recall: " + floatdisplay % recall)
print("F-score: " + floatdisplay % best_f_score)
args.f[0].close()
args.f[1].close() | Main function of smatch score calculation |
def _starts_with_vowel(self, letter_group: str) -> bool:
if len(letter_group) == 0:
return False
return self._contains_vowels(letter_group[0]) | Check if a string starts with a vowel. |
def _hide(self):
self._icon.set_visible(False)
self._icon.disconnect(self._conn_left)
self._icon.disconnect(self._conn_right)
self._conn_left = None
self._conn_right = None | Hide the tray icon. |
def api_delete(service, file_id, owner_token):
service += 'api/delete/%s' % file_id
r = requests.post(service, json={'owner_token': owner_token, 'delete_token': owner_token})
r.raise_for_status()
if r.text == 'OK':
return True
return False | Delete a file already uploaded to Send |
async def set(self, key, value):
if not check_dht_value_type(value):
raise TypeError(
"Value must be of type int, float, bool, str, or bytes"
)
log.info("setting '%s' = '%s' on network", key, value)
dkey = digest(key)
return await self.set_digest(dkey, value) | Set the given string key to the given value in the network. |
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path) | Clear the readonly bit and reattempt the removal |
def flush_records(self):
s = b"".join(p.raw_stateful() for p in self.buffer_out)
self.socket.send(s)
self.buffer_out = [] | Send all buffered records and update the session accordingly. |
def decode_mail_header(value, default_charset='us-ascii'):
try:
headers = decode_header(value)
except email.errors.HeaderParseError:
return str_decode(str_encode(value, default_charset, 'replace'), default_charset)
else:
for index, (text, charset) in enumerate(headers):
logger.debug("Mail header no. {index}: {data} encoding {charset}".format(
index=index,
data=str_decode(text, charset or 'utf-8', 'replace'),
charset=charset))
try:
headers[index] = str_decode(text, charset or default_charset,
'replace')
except LookupError:
headers[index] = str_decode(text, default_charset, 'replace')
return ''.join(headers) | Decode a header value into a unicode string. |
def sheets(self):
data = Dict()
for src in [src for src in self.zipfile.namelist() if 'xl/worksheets/' in src]:
name = os.path.splitext(os.path.basename(src))[0]
xml = self.xml(src)
data[name] = xml
return data | return the sheets of data. |
def update_continuously(records, update_interval=600):
while True:
for record in records:
try:
record.update()
except (ApiError, RequestException):
pass
time.sleep(update_interval) | Update `records` every `update_interval` seconds |
def write_json_or_yaml(file_path, content):
with codecs.open(file_path, "w", "utf-8") as f:
f.write(get_json_or_yaml(file_path, content)) | Write JSON or YAML depending on the file extension. |
def _slice_generator(self, index):
start, stop, step = index.indices(len(self))
for i in range(start, stop, step):
yield self.get_blob(i) | A simple slice generator for iterations |
def _commit_run_log(self):
logger.debug('Committing run log for job {0}'.format(self.name))
self.backend.commit_log(self.run_log) | Commit the current run log to the backend. |
def collect(self):
collectors = None
with self._lock:
collectors = copy.copy(self._collector_to_names)
for collector in collectors:
for metric in collector.collect():
yield metric | Yields metrics from the collectors in the registry. |
def skip(self, steps=1):
for _ in six.moves.range(steps):
self.next_token() | Skip ahead by 'steps' tokens. |
def com_google_fonts_check_valid_glyphnames(ttFont):
if ttFont.sfntVersion == b'\x00\x01\x00\x00' and ttFont.get(
"post") and ttFont["post"].formatType == 3.0:
yield SKIP, ("TrueType fonts with a format 3.0 post table contain no"
" glyph names.")
else:
import re
bad_names = []
for _, glyphName in enumerate(ttFont.getGlyphOrder()):
if glyphName in [".null", ".notdef", ".ttfautohint"]:
continue
if not re.match(r'^(?![.0-9])[a-zA-Z._0-9]{1,31}$', glyphName):
bad_names.append(glyphName)
if len(bad_names) == 0:
yield PASS, "Glyph names are all valid."
else:
from fontbakery.utils import pretty_print_list
yield FAIL, ("The following glyph names do not comply"
" with naming conventions: {}\n\n"
" A glyph name may be up to 31 characters in length,"
" must be entirely comprised of characters from"
" the following set:"
" A-Z a-z 0-9 .(period) _(underscore). and must not"
" start with a digit or period."
" There are a few exceptions"
" such as the special character \".notdef\"."
" The glyph names \"twocents\", \"a1\", and \"_\""
" are all valid, while \"2cents\""
" and \".twocents\" are not."
"").format(pretty_print_list(bad_names)) | Glyph names are all valid? |
def rollback(self):
if self.contents['rollback'] is None:
raise ValueError("No operation to roll back on refpkg")
future_msg = self.contents['log'][0]
rolledback_log = self.contents['log'][1:]
rollforward = copy.deepcopy(self.contents)
rollforward.pop('rollback')
self.contents = self.contents['rollback']
self.contents['log'] = rolledback_log
self.contents['rollforward'] = [future_msg, rollforward]
self._sync_to_disk() | Revert the previous modification to the refpkg. |
def decode_b64(data):
byte_string = data.encode('utf-8')
decoded = base64.b64decode(byte_string)
return decoded.decode('utf-8') | Wrapper for b64decode, without having to struggle with bytestrings. |
def assert_numbers_almost_equal(self, actual_val, expected_val, allowed_delta=0.0001,
failure_message='Expected numbers to be within {} of each other: "{}" and "{}"'):
assertion = lambda: abs(expected_val - actual_val) <= allowed_delta
self.webdriver_assert(assertion, unicode(failure_message).format(allowed_delta, actual_val, expected_val)) | Asserts that two numbers are within an allowed delta of each other |
def run(self):
self.toplevel.protocol("WM_DELETE_WINDOW", self.__on_window_close)
self.toplevel.mainloop() | Ejecute the main loop. |
def __insert_frond_LF(d_w, d_u, dfs_data):
dfs_data['LF'].append( (d_w, d_u) )
dfs_data['FG']['l'] += 1
dfs_data['last_inserted_side'] = 'LF' | Encapsulates the process of inserting a frond uw into the left side frond group. |
def _copy_settings_file(source, destination, name):
if os.path.exists(destination):
try:
ch = six.moves.input(
'File %s already exists, overwrite? y/[n]):' % destination)
if ch not in ('Y', 'y'):
return
except KeyboardInterrupt:
return
filepath = os.path.dirname(destination)
if not os.path.exists(filepath):
os.makedirs(filepath)
print('Copying default %s to %s' % (name, destination))
shutil.copy(source, destination)
os.chmod(destination, 0o664) | Copy a file from the repo to the user's home directory. |
def _merge_summary(in_files, out_file, data):
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for i, in_file in enumerate(in_files):
with open(in_file) as in_handle:
for j, line in enumerate(in_handle):
if j == 0:
if i == 0:
out_handle.write(line)
else:
out_handle.write(line)
return out_file | Create one big summary file for disambiguation from multiple splits. |
def create(self, name, *args, **kwargs):
resource_name = self._resource_name(name)
log.info(
"Creating {} '{}'...".format(self._model_name, resource_name))
resource = self.collection.create(*args, name=resource_name, **kwargs)
self._ids.add(resource.id)
return resource | Create an instance of this resource type. |
def _get_total_read_size(self):
if self.read_size:
read_size = EVENT_SIZE * self.read_size
else:
read_size = EVENT_SIZE
return read_size | How much event data to process at once. |
def _repr(self, obj, level):
try:
obj_repr = type(obj).__repr__
except Exception:
obj_repr = None
def has_obj_repr(t):
r = t.__repr__
try:
return obj_repr == r
except Exception:
return obj_repr is r
for t, prefix, suffix, comma in self.collection_types:
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_iter(obj, level, prefix, suffix, comma)
for t, prefix, suffix, item_prefix, item_sep, item_suffix in self.dict_types:
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_dict(obj, level, prefix, suffix,
item_prefix, item_sep, item_suffix)
for t in self.string_types:
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_str(obj, level)
if self._is_long_iter(obj):
return self._repr_long_iter(obj)
return self._repr_other(obj, level) | Returns an iterable of the parts in the final repr string. |
def _isdst(dt):
if type(dt) == datetime.date:
dt = datetime.datetime.combine(dt, datetime.datetime.min.time())
dtc = dt.replace(year=datetime.datetime.now().year)
if time.localtime(dtc.timestamp()).tm_isdst == 1:
return True
return False | Check if date is in dst. |
def _structure(msg, fp=None, level=0, include_default=False):
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default) | A handy debugging aid |
def _init_records(self, record_types):
for record_type in record_types:
if str(record_type) not in self._my_map['recordTypeIds']:
record_initialized = self._init_record(str(record_type))
if record_initialized:
self._my_map['recordTypeIds'].append(str(record_type)) | Initalize all records for this form. |
def dom_id(self):
parameter = 'DOMID'
if parameter not in self._by:
self._populate(by=parameter)
return self._by[parameter] | A dict of CLBs with DOM ID as key |
def exit(self):
if hasattr(self, 'digital'):
for pin in self.digital:
if pin.mode == SERVO:
pin.mode = OUTPUT
if hasattr(self, 'sp'):
self.sp.close() | Call this to exit cleanly. |
def deploy_file(self,
file_name,
calc_md5=True,
calc_sha1=True,
parameters={}):
if calc_md5:
md5 = md5sum(file_name)
if calc_sha1:
sha1 = sha1sum(file_name)
target = self
if self.is_dir():
target = self / pathlib.Path(file_name).name
with open(file_name, 'rb') as fobj:
target.deploy(fobj, md5, sha1, parameters) | Upload the given file to this path |
def display_animation(anim, **kwargs):
from IPython.display import HTML
return HTML(anim_to_html(anim, **kwargs)) | Display the animation with an IPython HTML object |
def render_tooltip(self, tooltip, obj):
if self.tooltip_attr:
val = getattr(obj, self.tooltip_attr)
elif self.tooltip_value:
val = self.tooltip_value
else:
return False
setter = getattr(tooltip, TOOLTIP_SETTERS.get(self.tooltip_type))
if self.tooltip_type in TOOLTIP_SIZED_TYPES:
setter(val, self.tooltip_image_size)
else:
setter(val)
return True | Render the tooltip for this column for an object |
def handle_response(self, response, **kwargs):
num_401s = kwargs.pop('num_401s', 0)
if not self.cbt_binding_tried and self.send_cbt:
cbt_application_data = _get_channel_bindings_application_data(response)
if cbt_application_data:
try:
self.cbt_struct = kerberos.channelBindings(application_data=cbt_application_data)
except AttributeError:
self.cbt_struct = None
self.cbt_binding_tried = True
if self.pos is not None:
response.request.body.seek(self.pos)
if response.status_code == 401 and num_401s < 2:
_r = self.handle_401(response, **kwargs)
log.debug("handle_response(): returning %s", _r)
log.debug("handle_response() has seen %d 401 responses", num_401s)
num_401s += 1
return self.handle_response(_r, num_401s=num_401s, **kwargs)
elif response.status_code == 401 and num_401s >= 2:
log.debug("handle_response(): returning 401 %s", response)
return response
else:
_r = self.handle_other(response)
log.debug("handle_response(): returning %s", _r)
return _r | Takes the given response and tries kerberos-auth, as needed. |
def cio_close(cio):
OPENJPEG.opj_cio_close.argtypes = [ctypes.POINTER(CioType)]
OPENJPEG.opj_cio_close(cio) | Wraps openjpeg library function cio_close. |
def getDarkCurrentAverages(exposuretimes, imgs):
x, imgs_p = sortForSameExpTime(exposuretimes, imgs)
s0, s1 = imgs[0].shape
imgs = np.empty(shape=(len(x), s0, s1),
dtype=imgs[0].dtype)
for i, ip in zip(imgs, imgs_p):
if len(ip) == 1:
i[:] = ip[0]
else:
i[:] = averageSameExpTimes(ip)
return x, imgs | return exposure times, image averages for each exposure time |
def download(url, filename, overwrite = False):
from requests import get
from os.path import exists
debug('Downloading ' + unicode(url) + '...')
data = get(url)
if data.status_code == 200:
if not exists(filename) or overwrite:
f = open(filename, 'wb')
f.write(data.content)
f.close()
return True
return False | Downloads a file via HTTP. |
def show(*args, **kw):
if len(kw):
raise TypeError('unexpected keyword argument: %r' % list(kw))
if args:
for arg in args:
print(envget(arg))
else:
listVars(prefix=" ", equals="=") | Print value of IRAF or OS environment variables. |
def after_processing(eng, objects):
super(InvenioProcessingFactory, InvenioProcessingFactory)\
.after_processing(eng, objects)
if eng.has_completed:
eng.save(WorkflowStatus.COMPLETED)
else:
eng.save(WorkflowStatus.HALTED)
db.session.commit() | Process to update status. |
def toStr(self) :
s = [self.strLegend]
for l in self.lines :
s.append(l.toStr())
return self.lineSeparator.join(s) | returns a string version of the CSV |
def __create_session(username=None, password=None):
config = Config()
if not username or not password:
username = config.username
password = config.password
payload = {
"username": username,
"password": password,
}
session_resp = requests.post("https://users.{}/sessions/".format(config.host), json=payload)
if session_resp.status_code == 403:
raise errors.ResourceError("bad user credentials")
return session_resp.cookies["session"] | grabs the configuration, and makes the call to Authentise to create the session |
def redis_version(self):
if not hasattr(self, '_redis_version'):
self._redis_version = tuple(
map(int, self.connection.info().get('redis_version').split('.')[:3])
)
return self._redis_version | Return the redis version as a tuple |
def _search_keys(text, keyserver, user=None):
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys | Helper function for searching keys from keyserver |
def reload(self, d=None):
if d:
self.clear()
self.update(d)
elif self.id:
new_dict = self.by_id(self._id)
self.clear()
self.update(new_dict)
else:
pass | Reload model from given dict or database. |
def BinaryBool(pred):
class Predicate(Bool):
def __init__(self, value, ignore_case=False):
self.value = caseless(value) if ignore_case else value
self.ignore_case = ignore_case
def __call__(self, data):
if not isinstance(data, list):
data = [data]
for d in data:
try:
if pred(caseless(d) if self.ignore_case else d, self.value):
return True
except:
pass
return False
return Predicate | Lifts predicates that take an argument into the DSL. |
def in6_getscope(addr):
if in6_isgladdr(addr) or in6_isuladdr(addr):
scope = IPV6_ADDR_GLOBAL
elif in6_islladdr(addr):
scope = IPV6_ADDR_LINKLOCAL
elif in6_issladdr(addr):
scope = IPV6_ADDR_SITELOCAL
elif in6_ismaddr(addr):
if in6_ismgladdr(addr):
scope = IPV6_ADDR_GLOBAL
elif in6_ismlladdr(addr):
scope = IPV6_ADDR_LINKLOCAL
elif in6_ismsladdr(addr):
scope = IPV6_ADDR_SITELOCAL
elif in6_ismnladdr(addr):
scope = IPV6_ADDR_LOOPBACK
else:
scope = -1
elif addr == '::1':
scope = IPV6_ADDR_LOOPBACK
else:
scope = -1
return scope | Returns the scope of the address. |
def _draw_swap_cv(self, board_image, swap):
tile_h, tile_w = self._TILE_SHAPE[0:2]
(row_1, col_1), (row_2, col_2) = swap
t = tile_h * min(row_1, row_2)
b = tile_h * (1 + max(row_1, row_2))
l = tile_w * min(col_1, col_2)
r = tile_w * (1 + max(col_1, col_2))
top_left = (l, t)
bottom_right = (r, b)
data.cv2.rectangle(board_image, top_left, bottom_right,
color=(255, 255, 255), thickness = 4) | Add a white tile border to indicate the swap. |
def dct2(input, K=13):
nframes, N = input.shape
freqstep = numpy.pi / N
cosmat = dctmat(N,K,freqstep,False)
return numpy.dot(input, cosmat) * (2.0 / N) | Convert log-power-spectrum to MFCC using the normalized DCT-II |
async def _get_subscriptions(self) -> Tuple[Set[Text], Text]:
url, params = self._get_subscriptions_endpoint()
get = self.session.get(url, params=params)
async with get as r:
await self._handle_fb_response(r)
data = await r.json()
for scope in data['data']:
if scope['object'] == 'page':
return (
set(x['name'] for x in scope['fields']),
scope['callback_url'],
)
return set(), '' | List the subscriptions currently active |
def clear_data(self):
self.log(u"Clear audio_data")
self.__samples_capacity = 0
self.__samples_length = 0
self.__samples = None | Clear the audio data, freeing memory. |
def to_json(self, depth=-1, **kwargs):
return json.dumps(self.to_dict(depth=depth, ordered=True), **kwargs) | Returns a JSON representation of the object. |
def send(self):
try:
for mx in self.mxrecords:
logging.info('Connecting to {} {}...'.format(mx, self.port))
server = smtplib.SMTP(mx, self.port)
server.set_debuglevel(logging.root.level < logging.WARN)
server.sendmail(
self.sender, [self.recipient], self.message.as_string())
server.quit()
return True
except Exception as e:
logging.error(e)
if (isinstance(e, IOError)
and e.errno in (errno.ENETUNREACH, errno.ECONNREFUSED)):
logging.error(
'Please check that port {} is open'.format(self.port))
if logging.root.level < logging.WARN:
raise e
return False | Attempts the delivery through recipient's domain MX records. |
def absent(name=None, canonical=None, **api_opts):
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
obj = __salt__['infoblox.get_cname'](name=name, canonical=canonical, **api_opts)
if not obj:
ret['result'] = True
ret['comment'] = 'infoblox already removed'
return ret
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'old': obj, 'new': 'absent'}
return ret
if __salt__['infoblox.delete_cname'](name=name, canonical=canonical, **api_opts):
ret['result'] = True
ret['changes'] = {'old': obj, 'new': 'absent'}
return ret | Ensure the CNAME with the given name or canonical name is removed |
def update_req(req):
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None | Updates a given req object with the latest version. |
def query_pre_approvals(self, initial_date, final_date, page=None,
max_results=None):
last_page = False
results = []
while last_page is False:
search_result = self._consume_query_pre_approvals(
initial_date, final_date, page, max_results)
results.extend(search_result.pre_approvals)
if search_result.current_page is None or \
search_result.total_pages is None or \
search_result.current_page == search_result.total_pages:
last_page = True
else:
page = search_result.current_page + 1
return results | query pre-approvals by date range |
def generate_report(book_url):
shares_no = None
avg_price = None
stock_template = templates.load_jinja_template("stock_template.html")
stock_rows = ""
with piecash.open_book(book_url, readonly=True, open_if_lock=True) as book:
all_stocks = portfoliovalue.get_all_stocks(book)
for stock in all_stocks:
for_date = datetime.today().date
model = portfoliovalue.get_stock_model_from(book, stock, for_date)
stock_rows += stock_template.render(model)
template = templates.load_jinja_template("template.html")
result = template.render(**locals())
return result | Generates an HTML report content. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.