code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def relpath(path):
return os.path.normpath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), path)
) | Path helper, gives you a path relative to this file |
def data_type(data, grouped=False, columns=None, key_on='idx', iter_idx=None):
if iter_idx:
return Data.from_mult_iters(idx=iter_idx, **data)
if pd:
if isinstance(data, (pd.Series, pd.DataFrame)):
return Data.from_pandas(data, grouped=grouped, columns=columns,
... | Data type check for automatic import |
def generate_file_name(self):
if not self.project:
raise UnderDefined("project name not given")
out_data_dir = prms.Paths.outdatadir
project_dir = os.path.join(out_data_dir, self.project)
file_name = "cellpy_batch_%s.json" % self.name
self.file_name = os.path.join(pro... | generate a suitable file name for the experiment |
def _gregorian_to_ssweek(date_value):
"Sundaystarting-week year, week and day for the given Gregorian calendar date"
yearStart = _ssweek_year_start(date_value.year)
weekNum = ((date_value - yearStart).days) // 7 + 1
dayOfWeek = date_value.weekday()+1
return (date_value.year, weekNum, dayOfWeek) | Sundaystarting-week year, week and day for the given Gregorian calendar date |
def run_continuous(self, scale):
if scale == self.scale_in:
raise ValueError("The scale must be different from the input scale")
elif scale < self.scale_in:
scale_min = scale
scale_max = self.scale_in
elif scale > self.scale_in:
scale_max = scale
... | Return a continuous solution to the RGE as `RGsolution` instance. |
def refreshDetails( self ):
tree = self.uiRecordsTREE
tree.blockSignals(True)
tree.setRecordSet(self.records())
tree.blockSignals(False) | Refreshes the results for the details view of the browser. |
def BrushForNode( self, node, depth=0 ):
if node == self.selectedNode:
color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHT)
elif node == self.highlightedNode:
color = wx.Colour( red=0, green=255, blue=0 )
else:
color = self.adapter.background_color(nod... | Create brush to use to display the given node |
def datetime64_to_datetime(dt):
dt64 = np.datetime64(dt)
ts = (dt64 - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's')
return datetime.datetime.utcfromtimestamp(ts) | convert numpy's datetime64 to datetime |
def kwds(self):
return _kwds(base=self.base, item=self.item,
leng=self.leng, refs=self.refs,
both=self.both, kind=self.kind, type=self.type) | Return all attributes as keywords dict. |
def handle_keypress(self):
if self.numpress == 2:
self.sig_double_tab_pressed.emit(True)
self.numpress = 0 | When hitting tab, it handles if single or double tab |
def xAxisIsMinor(self):
return min(self.radius.x, self.radius.y) == self.radius.x | Returns True if the minor axis is parallel to the X axis, boolean. |
def _createFromRDD(self, rdd, schema, samplingRatio):
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, t... | Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. |
def _old_run_cmd(self, cmd):
try:
proc = subprocess.Popen(
cmd,
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
data = proc.communicate()
return data[0], data[1], proc.returncode
... | Cleanly execute the command string |
def _process_pair(first_fn, second_fn, error_protocol):
ebook = None
metadata = None
if _is_meta(first_fn) and not _is_meta(second_fn):
logger.debug(
"Parsed: '%s' as meta, '%s' as data." % (first_fn, second_fn)
)
metadata, ebook = first_fn, second_fn
elif not _is_met... | Look at given filenames, decide which is what and try to pair them. |
def add(self, item):
if not item in self.items:
self.items.append(item) | Add an item to the set. |
def _timeout(self, timeout, handler, *args, **kw):
t = spawn_thread(target=handler, args=args, kw=kw)
t.daemon = True
t.start()
t.join(timeout)
if not t.is_alive():
if t.exc_info:
return t.exc_info
return t.result
else:
... | Controls the time allocated for the execution of a method |
def com_google_fonts_check_name_copyright_length(ttFont):
from fontbakery.utils import get_name_entries
failed = False
for notice in get_name_entries(ttFont, NameID.COPYRIGHT_NOTICE):
notice_str = notice.string.decode(notice.getEncoding())
if len(notice_str) > 500:
failed = True
yield FAIL... | Length of copyright notice must not exceed 500 characters. |
def _compute_weight(powers, wg):
pow1 = (powers[0], 0)
pow2 = (0, powers[1])
cal1 = _compute_value(pow1, wg)
cal2 = _compute_value(pow2, wg)
return cal1 * cal2 | Return the weight corresponding to given powers. |
def find_project_by_short_name(short_name, pbclient, all=None):
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the s... | Return project by short_name. |
def type_and_times(type_: str, start: Timestamp, end: Timestamp, probability: Number = None) -> str:
if not type_:
return ''
if type_ == 'BECMG':
return f"At {start.dt.hour or 'midnight'} zulu becoming"
ret = f"From {start.dt.hour or 'midnight'} to {end.dt.hour or 'midnight'} zulu,"
if p... | Format line type and times into the beginning of a spoken line string |
def CheckBlobsExist(self, blob_ids):
result = {}
for blob_id in blob_ids:
result[blob_id] = blob_id in self.blobs
return result | Checks if given blobs exit. |
def check_bundler(self):
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse(... | Run the bundler check. |
def template(args):
" Add or remove templates from site. "
site = Site(args.PATH)
if args.ACTION == "add":
return site.add_template(args.TEMPLATE)
return site.remove_template(args.TEMPLATE) | Add or remove templates from site. |
def should_copy_file(remote_storage, path, prefixed_path, source_storage):
if has_matching_etag(
remote_storage, source_storage, path, prefixed_path):
logger.info("%s: Skipping based on matching file hashes" % path)
return False
destroy_etag(path)
logger.info("%s: Hashes did not ... | Returns True if the file should be copied, otherwise False. |
def _get_command(classes):
commands = {}
setup_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')),
'setup.cfg')
for line in open(setup_file, 'r'):
for cl in classes:
if cl in line:
commands[cl] = line.split(' = ')[0].str... | Associates each command class with command depending on setup.cfg |
def show_plain_text(self, text):
self.switch_to_plugin()
self.switch_to_plain_text()
self.set_plain_text(text, is_code=False) | Show text in plain mode |
def validate(method):
@wraps(method)
def mod_run(self, rinput):
self.validate_input(rinput)
result = method(self, rinput)
self.validate_result(result)
return result
return mod_run | Decorate run method, inputs and outputs are validated |
def append(self, entry):
if not self.is_appendable(entry):
raise ValueError('entry not appendable')
self.data += entry.data | Append an entry to self |
def search(self, term=None, category=None, pages=1, url=search_url,
sort=None, order=None):
if not self.current_url:
self.current_url = url
if self.current_url == Search.base_url:
results = self._get_results(self.current_url)
self._add_results(results)
else:
search = self._format_search(term, cate... | Search a given URL for torrent results. |
def feedback_form_url(project, page):
return FEEDBACK_FORM_FMT.format(pageid=quote("{}: {}".format(project, page))) | Create a URL for feedback on a particular page in a project. |
def build(target_python, requirements):
if not requirements:
return
testapp = 'setup_testapp_python2.py'
android_sdk_home = os.environ['ANDROID_SDK_HOME']
android_ndk_home = os.environ['ANDROID_NDK_HOME']
if target_python == TargetPython.python3:
testapp = 'setup_testapp_python3_sqli... | Builds an APK given a target Python and a set of requirements. |
def transform(row, table):
'Extract links from "project" field and remove HTML from all'
data = row._asdict()
data["links"] = " ".join(extract_links(row.project))
for key, value in data.items():
if isinstance(value, six.text_type):
data[key] = extract_text(value)
return data | Extract links from "project" field and remove HTML from all |
def _serialiseExistingJob(self, jobGraph, jobStore, returnValues):
self._serialiseJobGraph(jobGraph, jobStore, returnValues, False)
jobGraph.command = None
assert len(jobGraph.stack) >= 4
combinedChildren = jobGraph.stack[-1] + jobGraph.stack[-3]
combinedFollowOns = jobGraph.stac... | Serialise an existing job. |
def read(filename, paramfile, bdfdir, scan):
filename = os.path.abspath(filename)
scans = ps.read_scans(filename, bdfdir=bdfdir)
logger.info('Scans, Target names:')
logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))
logger.info('Example pipeline:')
state = rt.set_pipeline(filen... | Simple parse and return metadata for pipeline for first scan |
def efetch(self, db, id):
db = db.lower()
xml = self._qs.efetch({'db': db, 'id': str(id)})
doc = le.XML(xml)
if db in ['gene']:
return EntrezgeneSet(doc)
if db in ['nuccore', 'nucest', 'protein']:
return GBSet(doc)
if db in ['pubmed']:
... | query the efetch endpoint |
def createDocument(self, initDict = None) :
if initDict is not None :
return self.createDocument_(initDict)
else :
if self._validation["on_load"] :
self._validation["on_load"] = False
return self.createDocument_(self.defaultDocument)
... | create and returns a document populated with the defaults or with the values in initDict |
def flush(self):
self.__flushLevel(0)
if self.__lastImport is not None:
self.imports.append(self.__lastImport) | Flushes the collected information |
def send(self, msg):
with self._pub_lock:
self.publish.send_string(msg)
return self | Send the given message. |
def register_route(self, app):
if "url" not in self.params["options"]:
raise Exception("Component does not have a URL property")
if not hasattr(self.route_func, "__call__"):
raise Exception("No app route function supplied")
app.add_url_rule(self.params["options"]["url"],
... | Register the api route function with the app. |
def _build_fields(self):
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
... | Builds a list of valid fields |
def changes_found(self):
if self.dest is None:
warnings.warn("dest directory not found!")
if self.src is None:
warnings.warn("src directory not found!")
if self.src is None or self.dest is None:
return False
dest_mtime = -1
src_mtime = os.path.... | Returns True if the target folder is older than the source folder. |
def _string_generator(descriptor, max_length=0, limit=0):
'Helper to create a string generator'
vals = list(values.get_strings(max_length, limit))
return gen.IterValueGenerator(descriptor.name, vals) | Helper to create a string generator |
def pretty_printer_factory(p_todolist, p_additional_filters=None):
p_additional_filters = p_additional_filters or []
printer = PrettyPrinter()
printer.add_filter(PrettyPrinterNumbers(p_todolist))
for ppf in p_additional_filters:
printer.add_filter(ppf)
printer.add_filter(PrettyPrinterColorFi... | Returns a pretty printer suitable for the ls and dep subcommands. |
def OnPadIntCtrl(self, event):
self.attrs["pad"] = event.GetValue()
post_command_event(self, self.DrawChartMsg) | Pad IntCtrl event handler |
def keys(self, namespace, prefix=None, limit=None, offset=None):
params = [namespace]
query = 'SELECT key FROM gauged_keys WHERE namespace = %s'
if prefix is not None:
query += ' AND key LIKE %s'
params.append(prefix + '%')
if limit is not None:
query ... | Get keys from a namespace |
def clear_cache(self):
self.logger.debug("Clearing cache")
if os.path.isdir(self.songcache_dir):
for filename in os.listdir(self.songcache_dir):
file_path = os.path.join(self.songcache_dir, filename)
try:
if os.path.isfile(file_path):
... | Removes all files from the songcache dir |
def from_data(cls, data):
if len(data) == 0:
raise ValueError("data array is empty.")
dim_x, dim_y = len(data[0][0]), len(data[0][1])
dataset = cls(dim_x, dim_y)
for x, y in data:
assert len(x) == dim_x and len(y) == dim_y
dataset.add_xy(x, y)
... | Create a dataset from an array of data, infering the dimension from the datapoint |
def dispatch_operation(self, operation, request, path_args):
try:
for middleware in self.middleware.pre_dispatch:
middleware(request, path_args)
resource = operation(request, path_args)
for middleware in self.middleware.post_dispatch:
resource ... | Dispatch and handle exceptions from operation. |
def install_PMK(self):
self.pmk = PBKDF2HMAC(
algorithm=hashes.SHA1(),
length=32,
salt=self.ssid.encode(),
iterations=4096,
backend=default_backend(),
).derive(self.passphrase.encode()) | Compute and install the PMK |
def scoped_timeline(self, *id, **kwargs):
self._validate_loaded()
params = {'user_id': id}
params.update(kwargs)
resource = self.SCOPED_TIMELINE.format(id=self.id)
response = Request(self.client, 'get', resource, params=params).perform()
return response.body['data'] | Returns the most recent promotable Tweets created by the specified Twitter user. |
def string_matches_sans_whitespace(self, str1, str2_fuzzy_whitespace):
str2_fuzzy_whitespace = re.sub('\s+', '\s*', str2_fuzzy_whitespace)
return re.search(str2_fuzzy_whitespace, str1) is not None | Check if two strings match, modulo their whitespace. |
def register_or_check(klass, finish, mean, between, refresh_presision, configuration):
m, created = klass.objects.get_or_create(finish=finish, configuration=configuration)
if created:
m.mean=mean
m.between=between
m.refresh_presision=refresh_presision
m.save()
else:
diff = abs(float(m.mean) - mean)... | Return the active configurations. |
def dump_engines(target=sys.stderr):
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, descri... | Print successfully imported templating engines. |
def channel_is_closed(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID,
) -> bool:
try:
channel_state = self._get_channel_state(
participant1=partic... | Returns true if the channel is in a closed state, false otherwise. |
def ungroupslice(groups,gslice):
'this is a helper for contigsub.'
'coordinate transform: takes a match from seqingroups() and transforms to ungrouped coordinates'
eltsbefore=0
for i in range(gslice[0]): eltsbefore+=len(groups[i])-1
x=eltsbefore+gslice[1]; return [x-1,x+gslice[2]-1] | this is a helper for contigsub. |
def _get_enum(self, source, bitarray):
raw_value = self._get_raw(source, bitarray)
value_desc = source.find('item', {'value': str(raw_value)}) or self._get_rangeitem(source, raw_value)
return {
source['shortcut']: {
'description': source.get('description'),
... | Get enum value, based on the data in XML |
def resize(self, new_size):
if new_size == len(self):
return
else:
self._saved = LimitedSizeDict(size_limit=2**5)
new_arr = zeros(new_size, dtype=self.dtype)
if len(self) <= new_size:
new_arr[0:len(self)] = self
else:
... | Resize self to new_size |
def warn_on_deprecated_args(self, args):
if getattr(args, "private", None) is not None and \
(os.path.exists(os.path.join(args.private, "setup.py")) or
os.path.exists(os.path.join(args.private, "pyproject.toml"))
):
if not getattr(args, "use_setup_py"... | Print warning messages for any deprecated arguments that were passed. |
def hash(self):
h = hash_pandas_object(self, index=True)
return hashlib.md5(h.values.tobytes()).hexdigest() | Generate a hash value. |
def delete(self):
res = self.rest_client.session.delete(self.rest_self)
_handle_http_errors(res) | Delete this application configuration. |
def decompress(self, value):
if value:
return [value.get(field.name, None) for field in self.fields]
return [field.field.initial for field in self.fields] | Retreieve each field value or provide the initial values |
def storage(self):
annotation = self.get_annotation()
if annotation.get(ATTACHMENTS_STORAGE) is None:
annotation[ATTACHMENTS_STORAGE] = OOBTree()
return annotation[ATTACHMENTS_STORAGE] | A storage which keeps configuration settings for attachments |
def fullName(self):
if self.parentName and self.name:
return self.parentName + '_' + self.name
return self.name or self.parentName | A full name, intended to uniquely identify a parameter |
def __insert_frond_RF(d_w, d_u, dfs_data):
dfs_data['RF'].append( (d_w, d_u) )
dfs_data['FG']['r'] += 1
dfs_data['last_inserted_side'] = 'RF' | Encapsulates the process of inserting a frond uw into the right side frond group. |
def _create_tc_dirs(self):
tc_log_path = self.profile.get('args', {}).get('tc_log_path')
if tc_log_path is not None and not os.path.isdir(tc_log_path):
os.makedirs(tc_log_path)
tc_out_path = self.profile.get('args', {}).get('tc_out_path')
if tc_out_path is not None and not os... | Create app directories for logs and data files. |
def write(self, learn:Learner, trn_batch:Tuple, val_batch:Tuple, iteration:int, tbwriter:SummaryWriter)->None:
"Writes training and validation batch images to Tensorboard."
self._write_for_dstype(learn=learn, batch=val_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Valid)
sel... | Writes training and validation batch images to Tensorboard. |
def _is_at_ref_end(self, nucmer_hit):
hit_coords = nucmer_hit.ref_coords()
return hit_coords.end >= nucmer_hit.ref_length - self.ref_end_tolerance | Returns True iff the hit is "close enough" to the end of the reference sequence |
def validate(self):
if not isinstance(self.location, Location):
raise TypeError(u'Expected Location location, got: {} {}'.format(
type(self.location).__name__, self.location))
if self.location.field:
raise ValueError(u'Expected location to point to a vertex, '
... | Validate that the ContextFieldExistence is correctly representable. |
def saltpath():
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(salt_path)} | Return the path of the salt module |
def find_min_required(path):
found_min_required = ''
for filename in glob.glob(os.path.join(path, '*.tf')):
with open(filename, 'r') as stream:
tf_config = hcl.load(stream)
if tf_config.get('terraform', {}).get('required_version'):
found_min_required = tf_config.g... | Inspect terraform files and find minimum version. |
def _service_heartbeat_if_due(self):
due = False
if self._service_registry:
if not hasattr(self, "status_info"):
due = True
else:
d = doublethink.utcnow() - self.status_info["last_heartbeat"]
due = d.total_seconds() > self.HEARTBEAT... | Sends service registry heartbeat if due |
def _escapeText(text):
output = ""
index = 0
match = reCharsToEscape.search(text, index)
while match:
output = output + text[index:match.start()] + '\\' + text[match.start()]
index = match.end()
match = reCharsToEscape.search(text, index)
output = output + text[index:]
return output | Adds backslash-escapes to property value characters that need them. |
def sub_split_values(self, sub):
for i, arr in enumerate(self.splits):
self.split_map[i] = [sub.get(x, x) for x in arr]
for split in self.surrogates:
split.sub_split_values(sub) | Substitutes the splits with other values into the split_map |
def rollback(self):
self._state_machine.transition_to_rollback()
for action in reversed(self._executed_actions):
try:
self.execute_with_retries(action, lambda a: a.rollback())
except:
pass
self._state_machine.transition_to_rollback_complete... | Call rollback on executed actions. |
def _eta_from_phi(self):
self.eta = scipy.ndarray(N_NT - 1, dtype='float')
etaprod = 1.0
for w in range(N_NT - 1):
self.eta[w] = 1.0 - self.phi[w] / etaprod
etaprod *= self.eta[w]
_checkParam('eta', self.eta, self.PARAMLIMITS, self.PARAMTYPES) | Update `eta` using current `phi`. |
def as_data_frame(self) -> pandas.DataFrame:
header_gene = {}
header_multiplex = {}
headr_transitions = {}
for gene in self.influence_graph.genes:
header_gene[gene] = repr(gene)
header_multiplex[gene] = f"active multiplex on {gene!r}"
headr_transitions... | Create a panda DataFrame representation of the resource table. |
def Publish(self, request, context):
LOG.debug("Publish called")
try:
self.plugin.publish(
[Metric(pb=m) for m in request.Metrics],
ConfigMap(pb=request.Config)
)
return ErrReply()
except Exception as err:
msg = "mes... | Dispatches the request to the plugins publish method |
def log_error(self, error, message, detail=None, strip=4):
"Add an error message and optional user message to the error list"
if message:
msg = message + ": " + error
else:
msg = error
tb = traceback.format_stack()
if sys.version_info >= (3, 0):
... | Add an error message and optional user message to the error list |
def base36encode(number):
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))... | Converts an integer into a base36 string. |
def dataframe(self, dtype=False, parse_dates=True, *args, **kwargs):
import pandas as pd
rg = self.row_generator
t = self.resolved_url.get_resource().get_target()
if t.target_format == 'csv':
return self.read_csv(dtype, parse_dates, *args, **kwargs)
try:
... | Return a pandas datafrome from the resource |
def cfg_lldp_interface(self, protocol_interface, phy_interface=None):
if phy_interface is None:
phy_interface = protocol_interface
self.create_attr_obj(protocol_interface, phy_interface)
ret = self.pub_lldp.enable_lldp(protocol_interface)
attr_obj = self.get_attr_obj(protocol... | Cfg LLDP on interface and create object. |
def __NormalizeStartEnd(self, start, end=None):
if end is not None:
if start < 0:
raise exceptions.TransferInvalidError(
'Cannot have end index with negative start index ' +
'[start=%d, end=%d]' % (start, end))
elif start >= self.to... | Normalizes start and end values based on total size. |
def ping(self, timeout=0, **kwargs):
def rand_id(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
payload = rand_id()
self.ws.ping(payload)
opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_PONG], **kw... | THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS |
def cost_zerg_corrected(self) -> "Cost":
if self.race == Race.Zerg and Attribute.Structure.value in self.attributes:
return Cost(
self._proto.mineral_cost - 50,
self._proto.vespene_cost,
self._proto.build_time
)
else:
re... | This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively |
def remove(in_bam):
if utils.file_exists(in_bam):
utils.remove_safe(in_bam)
if utils.file_exists(in_bam + ".bai"):
utils.remove_safe(in_bam + ".bai") | remove bam file and the index if exists |
def location(self) -> Optional[PostLocation]:
loc = self._field("location")
if self._location or not loc:
return self._location
location_id = int(loc['id'])
if any(k not in loc for k in ('name', 'slug', 'has_public_page', 'lat', 'lng')):
loc = self._context.get_js... | If the Post has a location, returns PostLocation namedtuple with fields 'id', 'lat' and 'lng' and 'name'. |
def normalize_date_format(date_str, default_time="00:00:00"):
ret_code, ret_data = check_date_str_format(date_str, default_time)
if ret_code != RET_OK:
return ret_code, ret_data
return RET_OK, ret_data.strftime("%Y-%m-%d %H:%M:%S") | normalize the format of data |
def compile(self, node, *args, **kwargs):
if isinstance(node, WhereNode):
return where_node_as_ldap(node, self, self.connection)
return super(SQLCompiler, self).compile(node, *args, **kwargs) | Parse a WhereNode to a LDAP filter string. |
def subscribe(self, id, name, port):
sub = gntp.core.GNTPSubscribe()
sub.add_header('Subscriber-ID', id)
sub.add_header('Subscriber-Name', name)
sub.add_header('Subscriber-Port', port)
if self.password:
sub.set_password(self.password, self.passwordHash)
self.add_origin_info(sub)
self.subscribe_hook(sub... | Send a Subscribe request to a remote machine |
def _get_prtflds(self):
ntflds = self.gosubdag.prt_attr['flds']
prt_flds = self.kws.get('prt_flds')
if prt_flds:
return prt_flds.intersection(ntflds)
exclude = set()
if self.gosubdag.relationships:
exclude.add('level')
return set(f for f in ntflds ... | Get print fields for GO header. |
def _create_binary_mathfunction(name, doc=""):
def _(col1, col2):
sc = SparkContext._active_spark_context
if isinstance(col1, Column):
arg1 = col1._jc
elif isinstance(col1, basestring):
arg1 = _create_column_from_name(col1)
else:
arg1 = float(col1)... | Create a binary mathfunction by name |
def compose_capability(base, *classes):
if _debug: compose_capability._debug("compose_capability %r %r", base, classes)
if not issubclass(base, Collector):
raise TypeError("base must be a subclass of Collector")
for cls in classes:
if not issubclass(cls, Capability):
raise TypeEr... | Create a new class starting with the base and adding capabilities. |
def _evaluate(self):
if not self.__retrieved:
self._elements = self._retrieve_revisions()
self.__retrieved = True
return super(RevisionCursor, self)._evaluate() | Lazily retrieves, caches, and returns the list of record _revisions |
def comp_listing(request, directory_slug=None):
context = {}
working_dir = settings.COMPS_DIR
if directory_slug:
working_dir = os.path.join(working_dir, directory_slug)
dirnames = []
templates = []
items = os.listdir(working_dir)
templates = [x for x in items if os.path.splitext(x)[1... | Output the list of HTML templates and subdirectories in the COMPS_DIR |
def add_function(self, func):
for block in func.blocks:
self.add_obj(block.addr, block) | Add a function `func` and all blocks of this function to the blanket. |
def _find(self, spec):
if spec.template_path is not None:
return spec.template_path
dir_path, file_name = self._find_relative(spec)
locator = self.loader._make_locator()
if dir_path is None:
path = locator.find_object(spec, self.loader.search_dirs, file_name=file_... | Find and return the path to the template associated to the instance. |
def download_gcs_file(path, out_fname=None, prefix_filter=None):
url = posixpath.join(GCS_BUCKET, path)
if prefix_filter:
url += "?prefix=%s" % prefix_filter
stream = bool(out_fname)
resp = requests.get(url, stream=stream)
if not resp.ok:
raise ValueError("GCS bucket inaccessible")
if out_fname:
... | Download a file from GCS, optionally to a file. |
def usn_v4_record(header, record):
length, major_version, minor_version = header
fields = V4_RECORD.unpack_from(record, RECORD_HEADER.size)
raise NotImplementedError('Not implemented') | Extracts USN V4 record information. |
def completedefault(self, text, line, *_):
tokens = line.split()
try:
before = tokens[-2]
complete = before.lower() in ("from", "update", "table", "into")
if tokens[0].lower() == "dump":
complete = True
if complete:
return [... | Autocomplete table names in queries |
def _round(num):
deci = num - math.floor(num)
if deci > 0.8:
return int(math.floor(num) + 1)
else:
return int(math.floor(num)) | A custom rounding function that's a bit more 'strict'. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.