code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def OnSecondaryCheckbox(self, event):
self.attrs["top"] = event.IsChecked()
self.attrs["right"] = event.IsChecked()
post_command_event(self, self.DrawChartMsg) | Top Checkbox event handler |
def genderize(name, api_token=None):
GENDERIZE_API_URL = "https://api.genderize.io/"
TOTAL_RETRIES = 10
MAX_RETRIES = 5
SLEEP_TIME = 0.25
STATUS_FORCELIST = [502]
params = {
'name': name
}
if api_token:
params['apikey'] = api_token
session = requests.Session()
retries = urllib3.util.Retry(total=TOTAL_RETRIES,
connect=MAX_RETRIES,
status=MAX_RETRIES,
status_forcelist=STATUS_FORCELIST,
backoff_factor=SLEEP_TIME,
raise_on_status=True)
session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
r = session.get(GENDERIZE_API_URL, params=params)
r.raise_for_status()
result = r.json()
gender = result['gender']
prob = result.get('probability', None)
acc = int(prob * 100) if prob else None
return gender, acc | Fetch gender from genderize.io |
def arc_distance(theta_1, phi_1,
theta_2, phi_2):
temp = np.sin((theta_2-theta_1)/2)**2+np.cos(theta_1)*np.cos(theta_2)*np.sin((phi_2-phi_1)/2)**2
distance_matrix = 2 * (np.arctan2(np.sqrt(temp),np.sqrt(1-temp)))
return distance_matrix | Calculates the pairwise arc distance between all points in vector a and b. |
def _tar_and_copy(src_dir, target_dir):
src_dir = src_dir.rstrip("/")
target_dir = target_dir.rstrip("/")
tmp_dir = tempfile.gettempdir().rstrip("/")
src_base = os.path.basename(src_dir)
shell_run(
"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .",
src_dir=src_dir,
src_base=src_base,
tmp_dir=tmp_dir)
final_destination = "%s/%s.tar.gz" % (target_dir, src_base)
shell_run(
("gsutil cp {tmp_dir}/{src_base}.tar.gz "
"{final_destination}"),
tmp_dir=tmp_dir,
src_base=src_base,
final_destination=final_destination)
return final_destination | Tar and gzip src_dir and copy to GCS target_dir. |
def list_address(self, domain):
" Get the list of addresses of a single domain."
try:
response = self.get('/REST/ARecord/%s/%s' % (
self.zone, domain))
except self.NotFoundError:
return []
addresses = response.content['data']
return [Address.from_url(self, uri) for uri in addresses] | Get the list of addresses of a single domain. |
def list(region, profile):
ini_data = {}
environment = {}
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_list(ini_data):
sys.exit(0)
else:
sys.exit(1) | List all the CloudFormation stacks in the given region. |
def _constructClient(client_version, username, user_domain, password, project_name, project_domain,
auth_url):
loader = loading.get_plugin_loader('password')
if user_domain is not None or project_domain is not None:
auth = loader.load_from_options(auth_url=auth_url, username=username, user_domain_name=user_domain,
password=password, project_name=project_name, project_domain_name=project_domain)
else:
auth = loader.load_from_options(auth_url=auth_url, username=username,
password=password, project_name=project_name)
sess = session.Session(auth=auth)
return client.Client(client_version, session=sess) | Return a novaclient from the given args. |
def _is_convertible_to_index(other):
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False | return a boolean whether I can attempt conversion to a TimedeltaIndex |
def create(self, request):
document = self.collection(request.json)
document.created_at = datetime.utcnow()
document.updated_at = document.created_at
created = document.insert()
return Response(
response=serialize(created),
status=(
201 if not all(
key in created for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
) | Creates a new document based on the given data |
def _data_is_binary(self, data):
if isinstance(data, six.binary_type):
return True
elif isinstance(data, list):
return functools.reduce(
lambda a, b: a or b, [self._data_is_binary(item)
for item in data], False)
elif isinstance(data, dict):
return functools.reduce(
lambda a, b: a or b, [self._data_is_binary(item)
for item in six.itervalues(data)],
False)
else:
return False | Check if the data contains binary components. |
def package_data(pkg, root_list):
data = []
for root in root_list:
for dirname, _, files in os.walk(os.path.join(pkg, root)):
for fname in files:
data.append(os.path.relpath(os.path.join(dirname, fname), pkg))
return {pkg: data} | Generic function to find package_data for `pkg` under `root`. |
def do_complete(self, code, cursor_pos):
self._klog.info("{%s}", code[cursor_pos:cursor_pos+10])
token, start = token_at_cursor(code, cursor_pos)
tkn_low = token.lower()
if is_magic(token, start, code):
matches = [k for k in magics.keys() if k.startswith(tkn_low)]
else:
matches = [sparql_names[k] for k in sparql_names
if k.startswith(tkn_low)]
self._klog.debug("token={%s} matches={%r}", token, matches)
if matches:
return {'status': 'ok',
'cursor_start': start,
'cursor_end': start+len(token),
'matches': matches} | Method called on autocompletion requests |
def as_html(self, path=""):
if path not in self.top_level_links:
raise StateError("Unknown path")
header =
footer =
rep = self.get_state_repr(path)
def make_links(rep):
for e, v in rep.items():
if e == "_href":
v = '<a href=%s>%s</a>' % (v, v)
rep[e] = v
else:
if type(v) == dict:
make_links(v)
make_links(rep)
rep_str_lines = json.dumps(rep, indent=4).split("\n")
buf = []
for l in rep_str_lines:
num_spaces = len(l) - len(l.lstrip())
l = " " * num_spaces + l[num_spaces:]
buf.append(l)
return "%s%s%s" % (header, "<br>\n".join(buf), footer) | Return a rendering of the current state in HTML. |
def _filter(self, text):
if self.urls:
text = RE_LINK.sub('', text)
if self.emails:
text = RE_MAIL.sub('', text)
return text | Filter out the URL and email addresses. |
def create_update():
update = upkey("update").setResultsName("action")
returns, none, all_, updated, old, new = map(
upkey, ["returns", "none", "all", "updated", "old", "new"]
)
return_ = returns + Group(
none | (all_ + old) | (all_ + new) | (updated + old) | (updated + new)
).setResultsName("returns")
return (
update
+ table
+ update_expr
+ Optional(keys_in)
+ Optional(where)
+ Optional(using)
+ Optional(return_)
+ Optional(throttle)
) | Create the grammar for the 'update' statement |
def delete_where_unique(cls, ip, object_id, location):
result = cls.where_unique(ip, object_id, location)
if result is None:
return None
result.delete()
return True | delete by ip and object id |
def host_to_ips(host):
ips = []
try:
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM):
if family == socket.AF_INET:
ip, port = sockaddr
elif family == socket.AF_INET6:
ip, port, flow_info, scope_id = sockaddr
ips.append(ip)
if not ips:
ips = None
except Exception:
ips = None
return ips | Returns a list of IP addresses of a given hostname or None if not found. |
def options(self, url, url_params=empty.dict, headers=empty.dict, timeout=None, **params):
return self.request('OPTIONS', url=url, headers=headers, timeout=timeout, **params) | Calls the service at the specified URL using the "OPTIONS" method |
def diff_op(self):
if self.graph is not None:
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
diff_op = self.graph.landmark_op
else:
diff_op = self.graph.diff_op
if sparse.issparse(diff_op):
diff_op = diff_op.toarray()
return diff_op
else:
raise NotFittedError("This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method.") | The diffusion operator calculated from the data |
def _check_file(self):
try:
self.editor.toPlainText()
except RuntimeError:
self._timer.stop()
return
if self.editor and self.editor.file.path:
if not os.path.exists(self.editor.file.path) and self._mtime:
self._notify_deleted_file()
else:
mtime = os.path.getmtime(self.editor.file.path)
if mtime > self._mtime:
self._mtime = mtime
self._notify_change()
writeable = os.access(self.editor.file.path, os.W_OK)
self.editor.setReadOnly(not writeable) | Checks watched file moficiation time and permission changes. |
def write_summary_cnts(self, go_ids):
obo = self.obo
cnts = self.get_cnts_levels_depths_recs([obo.get(GO) for GO in go_ids])
self._write_summary_cnts(cnts) | Write summary of level and depth counts for specific GO ids. |
def shorten_go_name_all(self, name):
name = self.replace_greek(name)
name = name.replace("MHC class I", "MHC-I")
return name | Shorten GO name for tables in paper, supplemental materials, and plots. |
def source(self):
if self._source is not None:
return self._source
elif self.parent is not None:
return self.parent.source
else:
return Location(self) | The source element this document element was created from. |
def col2hue(r, g, b):
return round2(180 / pi * atan2(sqrt(3) * (g - b), 2 * r - g - b) + 360) % 360 | Return hue value corresponding to given RGB color. |
def _label_desc(self, label, desc, label_color=''):
return self.BRIGHT + label_color + label + self.RESET + desc | Generic styler for a line consisting of a label and description. |
def build_queue(action, action_space, build_queue_id):
del action_space
action.action_ui.production_panel.unit_index = build_queue_id | Cancel a unit in the build queue. |
def _make_diff(self, correct, given):
dmp = DMP()
dmp.Diff_Timeout = 4
text1, text2, array = dmp.diff_linesToChars(correct, given)
diffs = dmp.diff_main(text1, text2)
dmp.diff_cleanupSemantic(diffs)
dmp.diff_charsToLines(diffs, array)
return list(dmp_to_mdiff(diffs)) | Return the intermediate representation of the diff. |
def setup(self, settings):
self.extract = tldextract.TLDExtract()
self.redis_conn = redis.Redis(host=settings['REDIS_HOST'],
port=settings['REDIS_PORT'],
db=settings.get('REDIS_DB'))
try:
self.redis_conn.info()
self.logger.debug("Connected to Redis in ZookeeperHandler")
except ConnectionError:
self.logger.error("Failed to connect to Redis in ZookeeperHandler")
sys.exit(1) | Setup redis and tldextract |
def _process_settings(**kwargs):
setting_name = kwargs.get('setting', None)
if setting_name is not None and setting_name != 'QUERYCOUNT':
return
if getattr(settings, 'QUERYCOUNT_THRESHOLDS', False):
QC_SETTINGS['THRESHOLDS'] = settings.QUERYCOUNT_THRESHOLDS
if not getattr(settings, 'QUERYCOUNT', False):
return
if 'DISPLAY_DUPLICATES' in settings.QUERYCOUNT:
duplicate_settings = settings.QUERYCOUNT['DISPLAY_DUPLICATES']
if duplicate_settings is not None:
duplicate_settings = int(duplicate_settings)
QC_SETTINGS['DISPLAY_DUPLICATES'] = duplicate_settings
for key in ['THRESHOLDS',
'IGNORE_REQUEST_PATTERNS',
'IGNORE_SQL_PATTERNS',
'IGNORE_PATTERNS',
'RESPONSE_HEADER']:
if key in settings.QUERYCOUNT:
QC_SETTINGS[key] = settings.QUERYCOUNT[key] | Apply user supplied settings. |
def group2commdct(commdct, glist):
for (gname, objname), commitem in zip(glist, commdct):
commitem[0]['group'] = gname
commitem[0]['idfobj'] = objname
return commdct | add group info tocomdct |
def index_column(self, index_name, table, column):
cursor = self.get_cursor()
try:
cursor.execute(
'CREATE INDEX {0} on {1}({2})'.format(index_name, table, column))
except sqlite3.OperationalError as error:
print(error)
print('Skipping index creation and assuming it exists already')
else:
self.conn.commit() | Called by interfaces to index specific column in table |
def declares_namespace_package(filename):
import ast
with open(filename) as fp:
init_py = ast.parse(fp.read(), filename)
calls = [node for node in ast.walk(init_py) if isinstance(node, ast.Call)]
for call in calls:
if len(call.args) != 1:
continue
if isinstance(call.func, ast.Attribute) and call.func.attr != 'declare_namespace':
continue
if isinstance(call.func, ast.Name) and call.func.id != 'declare_namespace':
continue
if isinstance(call.args[0], ast.Name) and call.args[0].id == '__name__':
return True
return False | Given a filename, walk its ast and determine if it declares a namespace package. |
def rrc(self, r):
if not is_number(self.regs[r]):
self.set(r, None)
self.set_flag(None)
return
v_ = self.getv(self.regs[r]) & 0xFF
self.regs[r] = str((v_ >> 1) | ((v_ & 1) << 7)) | Does a ROTATION to the RIGHT |>> |
def add_component_definition(self, definition):
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity)) | Add a ComponentDefinition to the document |
def _get_opus_maximum(self):
label =
opmax = self.session.get_resource(
BASE_URI_TYPES % "opmax",
self.session.get_class(surf.ns.ECRM['E55_Type'])
)
if opmax.is_present():
return opmax
else:
opmax.rdfs_label.append(Literal(label, "en"))
logger.debug("Created a new opus maximum type instance")
opmax.save()
return opmax | Instantiate an opus maximum type. |
def start_ebrisk(rupgetter, srcfilter, param, monitor):
with monitor('weighting ruptures'):
rupgetter.set_weights(srcfilter, param['num_taxonomies'])
if rupgetter.weights.sum() <= param['maxweight']:
yield ebrisk(rupgetter, srcfilter, param, monitor)
else:
for rgetter in rupgetter.split(param['maxweight']):
yield ebrisk, rgetter, srcfilter, param | Launcher for ebrisk tasks |
def activate(self, path, isdirectory):
from .utils import connection_with_anon, connection_with_gs
parsed = BotoClient.parse_query(path)
scheme = parsed[0]
bucket_name = parsed[1]
key = parsed[2]
if scheme == 's3' or scheme == 's3n':
conn = connection_with_anon(self.credentials)
bucket = conn.get_bucket(bucket_name)
elif scheme == 'gs':
conn = connection_with_gs(bucket_name)
bucket = conn.get_bucket()
else:
raise NotImplementedError("No file reader implementation for URL scheme " + scheme)
if isdirectory and (not key.endswith("/")):
key += "/"
self._scheme = scheme
self._conn = conn
self._key = key
self._bucket = bucket
self._active = True | Set up a boto connection. |
def _cancel_job(self, job, force):
import streamsx.st as st
if st._has_local_install:
return st._cancel_job(job.id, force,
domain_id=job.get_instance().get_domain().id, instance_id=job.get_instance().id)
return False | Cancel job using streamtool. |
def _add_numeric_methods_unary(cls):
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
_evaluate_numeric_unary.__name__ = opstr
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(operator.neg, '__neg__')
cls.__pos__ = _make_evaluate_unary(operator.pos, '__pos__')
cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__') | Add in numeric unary methods. |
def send_edit_string(self, nexus_host, path_snip, body_snip,
check_to_close_session=True):
starttime = time.time()
LOG.debug("NexusDriver edit config for host %s: path: %s body: %s",
nexus_host, path_snip, body_snip)
self.client.rest_post(path_snip, nexus_host, body_snip)
self.capture_and_print_timeshot(
starttime, "send_edit",
switch=nexus_host) | Sends rest Post request to Nexus switch. |
def time_segments_aggregate(X, interval, time_column, method=['mean']):
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
X = X.sort_values(time_column).set_index(time_column)
if isinstance(method, str):
method = [method]
start_ts = X.index.values[0]
max_ts = X.index.values[-1]
values = list()
index = list()
while start_ts <= max_ts:
end_ts = start_ts + interval
subset = X.loc[start_ts:end_ts - 1]
aggregated = [
getattr(subset, agg)(skipna=True).values
for agg in method
]
values.append(np.concatenate(aggregated))
index.append(start_ts)
start_ts = end_ts
return np.asarray(values), np.asarray(index) | Aggregate values over fixed length time segments. |
def bracket_level(text, open={'(', '[', '{'}, close={')', ']', '}'}):
level = 0
for c in text:
if c in open:
level += 1
elif c in close:
level -= 1
return level | Return 0 if string contains balanced brackets or no brackets. |
def _get_source(self, data):
if self.record_format == 'csl':
return CiteProcJSON([json.loads(data)])
elif self.record_format == 'bibtex':
return BibTeX(data) | Get source data object for citeproc-py. |
def _add_node(self, agent):
if agent is None:
return
node_label = _get_node_label(agent)
if isinstance(agent, Agent) and agent.bound_conditions:
bound_agents = [bc.agent for bc in agent.bound_conditions if
bc.is_bound]
if bound_agents:
bound_names = [_get_node_label(a) for a in bound_agents]
node_label = _get_node_label(agent) + '/' + \
'/'.join(bound_names)
self._complex_nodes.append([agent] + bound_agents)
else:
node_label = _get_node_label(agent)
node_key = _get_node_key(agent)
if node_key in self.existing_nodes:
return
self.existing_nodes.append(node_key)
self.graph.add_node(node_key,
label=node_label,
**self.node_properties) | Add an Agent as a node to the graph. |
def _kw(keywords):
r = {}
for k, v in keywords:
r[k] = v
return r | Turn list of keywords into dictionary. |
def write_response(self, response):
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup() | Writes response content synchronously to the transport. |
def yield_rows(adapter):
csvfile = StringIO()
writer = get_writer(csvfile)
writer.writerow(adapter.header())
yield csvfile.getvalue()
del csvfile
for row in adapter.rows():
csvfile = StringIO()
writer = get_writer(csvfile)
writer.writerow(row)
yield csvfile.getvalue()
del csvfile | Yield a dataset catalog line by line |
def create_config_backend_options(module_opts, env_name, env_vars):
backend_opts = {}
if module_opts.get('terraform_backend_config'):
backend_opts['config'] = merge_nested_environment_dicts(
module_opts.get('terraform_backend_config'),
env_name
)
if module_opts.get('terraform_backend_cfn_outputs'):
if not backend_opts.get('config'):
backend_opts['config'] = {}
if not backend_opts['config'].get('region'):
backend_opts['config']['region'] = env_vars['AWS_DEFAULT_REGION']
boto_args = extract_boto_args_from_env(env_vars)
cfn_client = boto3.client(
'cloudformation',
region_name=backend_opts['config']['region'],
**boto_args
)
for (key, val) in merge_nested_environment_dicts(module_opts.get('terraform_backend_cfn_outputs'),
env_name).items():
backend_opts['config'][key] = find_cfn_output(
val.split('::')[1],
cfn_client.describe_stacks(
StackName=val.split('::')[0]
)['Stacks'][0]['Outputs']
)
return backend_opts | Return backend options defined in module options. |
def monthly_wind_conditions(self):
return [WindCondition(x, y) for x, y in zip(
self._monthly_wind, self.monthly_wind_dirs)] | A list of 12 monthly wind conditions that are used on the design days. |
def _make_image(tensor):
assert isinstance(tensor, NDArray)
if Image is None:
raise ImportError('need to install PIL for visualizing images')
height, width, channel = tensor.shape
tensor = _make_numpy_array(tensor)
image = Image.fromarray(tensor)
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return Summary.Image(height=height, width=width, colorspace=channel,
encoded_image_string=image_string) | Converts an NDArray type image to Image protobuf |
def template(cls, address=None, commandtuple=None,
userdata=None, cmd2=-1, flags=None, acknak=None):
msgraw = bytearray([0x02, cls._code])
msgraw.extend(bytes(cls._receivedSize))
msg = ExtendedSend.from_raw_message(msgraw)
if commandtuple:
cmd1 = commandtuple.get('cmd1')
cmd2out = commandtuple.get('cmd2')
else:
cmd1 = None
cmd2out = None
if cmd2 is not -1:
cmd2out = cmd2
msg._address = Address(address)
msg._messageFlags = MessageFlags(flags)
msg._messageFlags.extended = 1
msg._cmd1 = cmd1
msg._cmd2 = cmd2out
msg._userdata = Userdata.template(userdata)
msg._acknak = acknak
return msg | Create a message template used for callbacks. |
def divide_prefixes(prefixes: List[str], seed:int=0) -> Tuple[List[str], List[str], List[str]]:
if len(prefixes) < 3:
raise PersephoneException(
"{} cannot be split into 3 groups as it only has {} items".format(prefixes, len(prefixes))
)
Ratios = namedtuple("Ratios", ["train", "valid", "test"])
ratios=Ratios(.90, .05, .05)
train_end = int(ratios.train*len(prefixes))
valid_end = int(train_end + ratios.valid*len(prefixes))
if valid_end == len(prefixes):
valid_end -= 1
if train_end == valid_end:
train_end -= 1
random.seed(seed)
random.shuffle(prefixes)
train_prefixes = prefixes[:train_end]
valid_prefixes = prefixes[train_end:valid_end]
test_prefixes = prefixes[valid_end:]
assert train_prefixes, "Got empty set for training data"
assert valid_prefixes, "Got empty set for validation data"
assert test_prefixes, "Got empty set for testing data"
return train_prefixes, valid_prefixes, test_prefixes | Divide data into training, validation and test subsets |
def _drop_duplicate_ij(self):
self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list(
zip(*[self.network['i'].values, self.network['j'].values]))))
self.network.drop_duplicates(['ij', 't'], inplace=True)
self.network.reset_index(inplace=True, drop=True)
self.network.drop('ij', inplace=True, axis=1) | Drops duplicate entries from the network dataframe. |
def dmp(self, thing):
def safe_getattr(key):
try:
return getattr(thing, key)
except Exception as e:
return 'Error getting attr "%s" from "%s" (%s: %s)' % (
key, thing, type(e).__name__, e
)
return dict((
escape(key), {
'val': self.safe_better_repr(safe_getattr(key)),
'type': type(safe_getattr(key)).__name__
}
) for key in dir(thing)) | Dump the content of an object in a dict for wdb.js |
def to_nice_yaml(yaml_input, indentation=2):
return yaml.safe_dump(yaml_input, indent=indentation,
allow_unicode=True, default_flow_style=False) | Return condensed yaml into human readable yaml. |
def add_virtualip(self, lb, vip):
resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id,
body=vip.to_dict())
return resp, body | Adds the VirtualIP to the specified load balancer. |
def _get_type(self, obj):
typever = obj['Type']
typesplit = typever.split('.')
return typesplit[0] + '.' + typesplit[1] | Return the type of an object. |
def _active_mounts_darwin(ret):
for line in __salt__['cmd.run_stdout']('mount').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
parens = re.findall(r'\((.*?)\)', line, re.DOTALL)[0].split(", ")
ret[comps[2]] = {'device': comps[0],
'fstype': parens[0],
'opts': _resolve_user_group_names(parens[1:])}
return ret | List active mounts on Mac OS systems |
def get(self, sched_rule_id):
path = '/'.join(['schedulerule', sched_rule_id])
return self.rachio.get(path) | Retrieve the information for a scheduleRule entity. |
def rest_name(cls):
if cls.__name__ == "NURESTRootObject" or cls.__name__ == "NURESTObject":
return "Not Implemented"
if cls.__rest_name__ is None:
raise NotImplementedError('%s has no defined name. Implement rest_name property first.' % cls)
return cls.__rest_name__ | Represents a singular REST name |
def apply_compact(graph_path):
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
with tf.gfile.GFile(graph_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def)
input_img = sess.graph.get_tensor_by_name('import/input_img:0')
prediction_img = sess.graph.get_tensor_by_name('import/prediction_img:0')
prediction = sess.run(prediction_img, {input_img: cv2.imread('lena.png')[None, ...]})
cv2.imwrite('applied_compact.png', prediction[0]) | Run the pruned and frozen inference graph. |
def python_lib_rpm_dirs(self):
libs = [self.python_lib_arch_dir, self.python_lib_non_arch_dir]
def append_rpm(path):
return os.path.join(path, 'rpm')
return map(append_rpm, libs) | Both arch and non-arch site-packages directories. |
def predict(dataset, fitmodel_url, save_results=True, show=False):
from disco.job import Job
from disco.worker.pipeline.worker import Worker, Stage
from disco.core import result_iterator
if "kmeans_fitmodel" not in fitmodel_url:
raise Exception("Incorrect fit model.")
job = Job(worker=Worker(save_results=save_results))
job.params = dict(dataset.params.items() + mean_point_center.items())
job.params["centers"] = [(i, c) for i, c in result_iterator(fitmodel_url["kmeans_fitmodel"])]
job.pipeline = [("split", Stage("kmeans_predict", input_chain=dataset.params["input_chain"], init=simple_init,
process=predict_map))]
job.run(input=dataset.params["data_tag"], name="kmeans_predict")
return job.wait(show=show) | Predict the closest clusters for the datapoints in input. |
def md5sum(filename, blocksize=8192):
with open(filename, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest() | Get the MD5 checksum of a file. |
def check_hash_key(query_on, key):
return (
isinstance(key, BaseCondition) and
(key.operation == "==") and
(key.column is query_on.hash_key)
) | Only allows == against query_on.hash_key |
def _deserialize(self, data: dict, silent=True) -> dict:
deserialized_data = {}
for name, field in self._fields.items():
if field._readonly is False and name in data:
deserialized_data[name] = data.get(name)
return deserialized_data | Internal deserialize method for sifting out unacceptable data for the model |
def update(self, incoming_stream, seek=0, size=None, chunk_size=None,
progress_callback=None):
fp = self.open(mode='r+b')
try:
fp.seek(seek)
bytes_written, checksum = self._write_stream(
incoming_stream, fp, chunk_size=chunk_size,
size=size, progress_callback=progress_callback)
finally:
fp.close()
return bytes_written, checksum | Update a file in the file system. |
def _wrap_attr(self, attrs, context=None):
for attr in attrs:
if isinstance(attr, UnboundMethod):
if _is_property(attr):
yield from attr.infer_call_result(self, context)
else:
yield BoundMethod(attr, self)
elif hasattr(attr, "name") and attr.name == "<lambda>":
if attr.args.args and attr.args.args[0].name == "self":
yield BoundMethod(attr, self)
continue
yield attr
else:
yield attr | wrap bound methods of attrs in a InstanceMethod proxies |
def top_level_doc(self):
return self._doc_template.format(
available_commands='\n '.join(sorted(self._commands)),
program=self.program) | The top-level documentation string for the program. |
def send_email(self, register_user):
try:
from flask_mail import Mail, Message
except Exception:
log.error("Install Flask-Mail to use User registration")
return False
mail = Mail(self.appbuilder.get_app)
msg = Message()
msg.subject = self.email_subject
url = url_for(
".activation",
_external=True,
activation_hash=register_user.registration_hash,
)
msg.html = self.render_template(
self.email_template,
url=url,
username=register_user.username,
first_name=register_user.first_name,
last_name=register_user.last_name,
)
msg.recipients = [register_user.email]
try:
mail.send(msg)
except Exception as e:
log.error("Send email exception: {0}".format(str(e)))
return False
return True | Method for sending the registration Email to the user |
def check_api_key(request, key, hproPk):
if settings.PIAPI_STANDALONE:
return True
(_, _, hproject) = getPlugItObject(hproPk)
if not hproject:
return False
if hproject.plugItApiKey is None or hproject.plugItApiKey == '':
return False
return hproject.plugItApiKey == key | Check if an API key is valid |
def _remove(self, shard_name):
result = self.router_command("removeShard", shard_name, is_eval=False)
if result['ok'] == 1 and result['state'] == 'completed':
shard = self._shards.pop(shard_name)
if shard.get('isServer', False):
Servers().remove(shard['_id'])
if shard.get('isReplicaSet', False):
ReplicaSets().remove(shard['_id'])
return result | remove member from configuration |
def _create_metadata_cache(cache_location):
cache_url = os.getenv('GUTENBERG_FUSEKI_URL')
if cache_url:
return FusekiMetadataCache(cache_location, cache_url)
try:
return SleepycatMetadataCache(cache_location)
except InvalidCacheException:
logging.warning('Unable to create cache based on BSD-DB. '
'Falling back to SQLite backend. '
'Performance may be degraded significantly.')
return SqliteMetadataCache(cache_location) | Creates a new metadata cache instance appropriate for this platform. |
def _iter_content_generator(response, decode_unicode):
for chunk in response.iter_content(100 * 1024, decode_unicode=decode_unicode):
if decode_unicode:
chunk = chunk.replace('\r\n', '\n')
chunk = chunk.rstrip('\r')
yield chunk | Generator used to yield 100 KiB chunks for a given response. |
def add_tcp_flag(self, tcp_flag):
if tcp_flag not in [1, 2, 4, 8, 16, 32, 64, 128]:
raise ValueError("Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]")
prev_size = 0
if self._json_dict.get('tcp_flags') is None:
self._json_dict['tcp_flags'] = 0
else:
prev_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3
self._json_dict['tcp_flags'] |= tcp_flag
new_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3
self._size += new_size - prev_size
if prev_size == 0 and self._has_field:
self._size += 2
self._has_field = True | Add a single TCP flag - will be OR'd into the existing bitmask |
def disasters_sim(early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
return concatenate((pm.rpoisson(early_mean, size=switchpoint), pm.rpoisson(
late_mean, size=n - switchpoint))) | Coal mining disasters sampled from the posterior predictive distribution |
def close(self):
logger.info("Closing connection to %s:%s", self._host, self._port)
self._ioloop_future.cancel()
try:
yield from self._ioloop_future
except asyncio.CancelledError:
pass | Disconnect from the controller. |
def expect(self, use_proportions=True):
changed = self.get_changed(self.partition, self.prev_partition)
lk_table = self.generate_lktable(self.partition, changed, use_proportions)
self.table = self.likelihood_table_to_probs(lk_table) | The Expectation step of the CEM algorithm |
def analyze_script(self, index=None):
if self.is_analysis_done:
return
if index is None:
index = self.get_stack_index()
if self.data:
finfo = self.data[index]
if self.todolist_enabled:
finfo.run_todo_finder()
self.is_analysis_done = True | Analyze current script with todos |
def quit(self):
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {} | Send LMTP QUIT command, read the server response and disconnect. |
def _use_html_if_available(format_fn):
def format_using_as_html(v, label=False):
if not label and hasattr(v, 'as_html'):
return v.as_html()
else:
return format_fn(v, label)
return format_using_as_html | Use the value's HTML rendering if available, overriding format_fn. |
def terminate(self, reboot=False):
self.root.manage.manage = False
self.root.mode = 'delete'
self.root.init_boot = reboot
self.client.set_profile(self.root.get_json()) | Delete VIOM configuration from iRMC. |
def generate_git_version_info():
info = GitInfo()
git_path = call(('which', 'git'))
info.builder = get_build_name()
info.build_date = get_build_date()
info.hash, info.date, info.author, info.committer = (
get_last_commit(git_path))
info.branch = get_git_branch(git_path)
info.tag = get_git_tag(info.hash, git_path)
if info.tag:
info.version = info.tag.strip('v')
info.release = not re.search('[a-z]', info.version.lower())
else:
info.version = info.hash[:6]
info.release = False
info.last_release = determine_latest_release_version()
call((git_path, 'update-index', '-q', '--refresh'))
info.status = get_git_status(git_path)
return info | Query the git repository information to generate a version module. |
def create_client_from_path(self, path):
self.create_new_client()
sw = self.get_current_shellwidget()
sw.set_cwd(path) | Create a client with its cwd pointing to path. |
def readQuotes(self, start, end):
if self.symbol is None:
LOG.debug('Symbol is None')
return []
return self.__yf.getQuotes(self.symbol, start, end) | read quotes from Yahoo Financial |
def _can_connect(ip, port):
cs = socket.socket()
try:
cs.connect((ip, port))
cs.close()
return True
except socket.error:
return False | Checks if a TCP port at IP address is possible to connect to |
def import_module(self, name):
if name not in self._objects:
module = _import_module(name)
self._objects[name] = module
self._object_references[id(module)] = name
return self._objects[name] | Import a module into the bridge. |
def update_count(self):
node_rating_count = self.node.rating_count
node_rating_count.likes = self.node.vote_set.filter(vote=1).count()
node_rating_count.dislikes = self.node.vote_set.filter(vote=-1).count()
node_rating_count.save() | updates likes and dislikes count |
def items(self):
for c in self._all_combos():
_, value = _getitem(self.cube, c)
yield c, value | ITERATE THROUGH ALL coord, value PAIRS |
def _login(session):
session.get(LOGIN_REFERER)
resp = session.post(LOGIN_URL, {
'user': session.auth.username,
'pwd': session.auth.password
}, headers={
'Referer': LOGIN_REFERER,
'X-Requested-With': 'XMLHttpRequest'
})
if resp.status_code != 200:
raise FedexError('could not login')
data = resp.json()
if not data['successful']:
raise FedexError(data['errorList'][0]['error']['message'])
_save_cookies(session.cookies, session.auth.cookie_path) | Login to Fedex Delivery Manager. |
def encode1(self):
data_uri = b64encode(open(self.path, 'rb').read()).decode('utf-8').replace('\n', '')
return '<img src="data:image/png;base64,{0}">'.format(data_uri) | Return the base64 encoding of the figure file and insert in html image tag. |
def wait(self, timeout):
logger.debug('Waiting for %fs', timeout)
return self._event.wait(timeout) | Wait for the provided time to elapse |
def cache_data(datatable, data, **kwargs):
cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs))
log.debug("Setting data to cache at %r: %r", cache_key, data)
cache.set(cache_key, data) | Stores the object list in the cache under the appropriate key. |
def sign(key, qs):
sig = derive_signature(key, qs)
return "%s&%s" % (qs, urlencode([("sig", sig)])) | Signs the query string using the key. |
def configure(self, config):
self._prefix_length = config.getint(
'skip{suffix}'.format(suffix=self._option_suffix),
fallback=self._prefix_length) | Configure the amount of characters to skip. |
def command_publish(self, command, **kwargs):
mqttc = mqtt.Client()
mqttc.connect(
command['host'],
port=int(command['port']))
mqttc.loop_start()
try:
mqttc.publish(
command['endpoint'],
command['payload'])
finally:
mqttc.loop_stop(force=False) | Publish a MQTT message |
def rawsql(query, ec=None):
if isinstance(query, Result):
query = query.get_query()
ec = ec or __default_engine__
if isinstance(ec, (str, unicode)):
engine = engine_manager[ec]
dialect = engine.engine.dialect
else:
dialect = ec.dialect
if isinstance(query, (str, unicode)):
return query
compiler = query._compiler(dialect)
class LiteralCompiler(compiler.__class__):
def visit_bindparam(
self, bindparam, within_columns_clause=False,
literal_binds=False, **kwargs
):
return super(LiteralCompiler, self).render_literal_bindparam(
bindparam, within_columns_clause=within_columns_clause,
literal_binds=literal_binds, **kwargs
)
def render_literal_value(self, value, type_):
return repr_value(value)
compiler = LiteralCompiler(dialect, query)
return str(compiler.process(query)).replace('\n', '') | ec could be engine name or engine instance |
def churn(self):
canceled = self.canceled().count()
active = self.active().count()
return decimal.Decimal(str(canceled)) / decimal.Decimal(str(active)) | Return number of canceled Subscriptions divided by active Subscriptions. |
def clear_calendars(self):
self._calendars.clear()
self._calendar_factories.clear()
self._aliases.clear() | Deregisters all current registered calendars |
def __alloc_raw_data(self, initial_values=None):
if self.__raw_data == None:
raw_data_type = c_ubyte * self.__raw_report_size
self.__raw_data = raw_data_type()
elif initial_values == self.__raw_data:
return
else:
ctypes.memset(self.__raw_data, 0, len(self.__raw_data))
if initial_values:
for index in range(len(initial_values)):
self.__raw_data[index] = initial_values[index] | Pre-allocate re-usagle memory |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.