code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def __unregister_services(self):
with self.__registration_lock:
registered_services = self.__registered_services.copy()
for registration in registered_services:
try:
registration.unregister()
except BundleException:
pass
if self... | Unregisters all bundle services |
def _get_pillar_cfg(pillar_key,
pillarenv=None,
saltenv=None):
pillar_cfg = __salt__['pillar.get'](pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
return pillar_cfg | Retrieve the pillar data from the right environment. |
def validate(self):
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise errors.Pars... | Validate required headers and validate notification headers |
def run(items, run_parallel):
to_process = []
extras = []
for batch, cur_items in _group_by_batches(items).items():
if _ready_for_het_analysis(cur_items):
to_process.append((batch, cur_items))
else:
for data in cur_items:
extras.append([data])
proc... | Top level entry point for calculating heterogeneity, handles organization and job distribution. |
def unpack_flags(value, flags):
try:
return [flags[value]]
except KeyError:
return [flags[k] for k in sorted(flags.keys()) if k & value > 0] | Multiple flags might be packed in the same field. |
def rebin(a, newshape):
slices = [slice(0, old, float(old)/new)
for old, new in zip(a.shape, newshape)]
coordinates = numpy.mgrid[slices]
indices = coordinates.astype('i')
return a[tuple(indices)] | Rebin an array to a new shape. |
def _doc_parms(cls):
axis_descr = "{%s}" % ', '.join("{0} ({1})".format(a, i)
for i, a in enumerate(cls._AXIS_ORDERS))
name = (cls._constructor_sliced.__name__
if cls._AXIS_LEN > 1 else 'scalar')
name2 = cls.__name__
return axis_descr, name, name2 | Return a tuple of the doc parms. |
def writeBoolean(self, n):
t = TYPE_BOOL_TRUE
if n is False:
t = TYPE_BOOL_FALSE
self.stream.write(t) | Writes a Boolean to the stream. |
def strip_files(files, argv_max=(256 * 1024)):
tostrip = [(fn, flipwritable(fn)) for fn in files]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = reduce(operator.add, [len(s) + 1 for s in cmd])
while pathlen < argv_max:
if not tostrip:
break
... | Strip a list of files |
def _create_http_client():
global _http_client
defaults = {'user_agent': USER_AGENT}
auth_username, auth_password = _credentials
if auth_username and auth_password:
defaults['auth_username'] = auth_username
defaults['auth_password'] = auth_password
_http_client = httpclient.AsyncHTTP... | Create the HTTP client with authentication credentials if required. |
async def generate_access_token(self, user):
payload = await self._get_payload(user)
secret = self._get_secret(True)
algorithm = self._get_algorithm()
return jwt.encode(payload, secret, algorithm=algorithm).decode("utf-8") | Generate an access token for a given user. |
def visit_Import(self, node):
for alias in node.names:
current_module = MODULES
for path in alias.name.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(alias.name),
... | Check if imported module exists in MODULES. |
def _setup_states(state_definitions, prev=()):
states = list(prev)
for state_def in state_definitions:
if len(state_def) != 2:
raise TypeError(
"The 'state' attribute of a workflow should be "
"a two-tuple of strings; got %r instead." % (state_def,)
... | Create a StateList object from a 'states' Workflow attribute. |
def _fail(self, message, text, i):
raise ValueError("{}:\n{}".format(message, text[i : i + 79])) | Raise an exception with given message and text at i. |
def _collapse_by_bam_variantcaller(samples):
by_bam = collections.OrderedDict()
for data in (x[0] for x in samples):
work_bam = utils.get_in(data, ("combine", "work_bam", "out"), data.get("align_bam"))
variantcaller = get_variantcaller(data)
if isinstance(work_bam, list):
wor... | Collapse regions to a single representative by BAM input, variant caller and batch. |
def gen_template_files(path):
" Generate relative template pathes. "
path = path.rstrip(op.sep)
for root, _, files in walk(path):
for f in filter(lambda x: not x in (TPLNAME, CFGNAME), files):
yield op.relpath(op.join(root, f), path) | Generate relative template pathes. |
def delete_typeattr(typeattr,**kwargs):
tmpltype = get_templatetype(typeattr.type_id, user_id=kwargs.get('user_id'))
ta = db.DBSession.query(TypeAttr).filter(TypeAttr.type_id == typeattr.type_id,
TypeAttr.attr_id == typeattr.attr_id).one()
tmpltype.typeattrs.remove(... | Remove an typeattr from an existing type |
def checksum(self, path, hashtype='sha1'):
return self._handler.checksum(hashtype, posix_path(path)) | Returns the checksum of the given path. |
def _terminate_procs(procs):
logging.warn("Stopping all remaining processes")
for proc, g in procs.values():
logging.debug("[%s] SIGTERM", proc.pid)
try:
proc.terminate()
except OSError as e:
if e.errno != errno.ESRCH:
raise
sys.exit(1) | Terminate all processes in the process dictionary |
def check_exists(path, type='file'):
if type == 'file':
if not os.path.isfile(path):
raise RuntimeError('The file `%s` does not exist.' % path)
else:
if not os.path.isdir(path):
raise RuntimeError('The folder `%s` does not exist.' % path)
return True | Check if a file or a folder exists |
def run(self):
t0 = time.time()
haveQ = self._isReactiveMarket()
self._withholdOffbids()
self._offbidToCase()
success = self._runOPF()
if success:
gteeOfferPrice, gteeBidPrice = self._nodalPrices(haveQ)
self._runAuction(gteeOfferPrice, gteeBidPrice... | Computes cleared offers and bids. |
def to_unit_memory(number):
kb = 1024
number /= kb
if number < 100:
return '{} Kb'.format(round(number, 2))
number /= kb
if number < 300:
return '{} Mb'.format(round(number, 2))
number /= kb
return '{} Gb'.format(round(number, 2)) | Creates a string representation of memory size given `number`. |
def get(self, request, pzone_pk):
try:
pzone = PZone.objects.get(pk=pzone_pk)
except PZone.DoesNotExist:
raise Http404("Cannot find given pzone.")
filters = {"pzone": pzone}
if "from" in request.GET:
parsed = dateparse.parse_datetime(request.GET["from"... | Get all the operations for a given pzone. |
def check_purge_status(self, purge_id):
content = self._fetch("/purge?id=%s" % purge_id)
return map(lambda x: FastlyPurgeStatus(self, x), content) | Get the status and times of a recently completed purge. |
def render_robots_meta_tag(context):
request = context['request']
robots_indexing = None
robots_following = None
if context.request.get_host() in settings.META_TAGGER_ROBOTS_DOMAIN_WHITELIST:
if context.get('object'):
try:
robots_indexing = context['object'].get_robot... | Returns the robots meta tag. |
def tally_role_columns(self):
totals = self.report["totals"]
roles = self.report["roles"]
totals["dependencies"] = sum(roles[item]
["total_dependencies"] for item in roles)
totals["defaults"] = sum(roles[item]
["total_... | Sum up all of the stat columns. |
def create_genome_size_dict(genome):
size_file = get_genome_size_file(genome)
size_lines = open(size_file).readlines()
size_dict = {}
for line in size_lines:
genome, length = line.split()
size_dict[genome] = int(length)
return size_dict | Creates genome size dict from string containing data. |
def send_prefix(pymux, variables):
process = pymux.arrangement.get_active_pane().process
for k in pymux.key_bindings_manager.prefix:
vt100_data = prompt_toolkit_key_to_vt100_key(k)
process.write_input(vt100_data) | Send prefix to active pane. |
def getfile(object):
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError, 'arg is a built-in module'
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__fil... | Work out which source or compiled file an object was defined in. |
def _create_merge_filelist(bam_files, base_file, config):
bam_file_list = "%s.list" % os.path.splitext(base_file)[0]
samtools = config_utils.get_program("samtools", config)
with open(bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtoo... | Create list of input files for merge, ensuring all files are valid. |
def on_if(self, node):
block = node.body
if not self.run(node.test):
block = node.orelse
for tnode in block:
self.run(tnode) | Regular if-then-else statement. |
def to_representation(self, instance):
ret = OrderedDict()
readable_fields = [
field for field in self.fields.values()
if not field.write_only
]
for field in readable_fields:
try:
field_representation = self._get_field_representation(fi... | Object instance -> Dict of primitive datatypes. |
def deserialize_uri(value):
if isinstance(value, BNode):
return value
if isinstance(value, URIRef):
return value
if not value:
return None
if not isinstance(value, basestring):
raise ValueError("Cannot create URI from {0} of type {1}".format(value, value.__class__))
i... | Deserialize a representation of a BNode or URIRef. |
def update_token_tempfile(token):
with open(tmp, 'w') as f:
f.write(json.dumps(token, indent=4)) | Example of function for token update |
def create_manifest(self):
config_path = os.path.join(self.expt.control_path,
DEFAULT_CONFIG_FNAME)
self.manifest = []
if os.path.isfile(config_path):
self.manifest.append(config_path)
for model in self.expt.models:
config_files ... | Construct the list of files to be tracked by the runlog. |
def rooms_favorite(self, room_id=None, room_name=None, favorite=True):
if room_id is not None:
return self.__call_api_post('rooms.favorite', roomId=room_id, favorite=favorite)
elif room_name is not None:
return self.__call_api_post('rooms.favorite', roomName=room_name, favorite=f... | Favorite or unfavorite room. |
def special_login_handler(self, delay_factor=1):
delay_factor = self.select_delay_factor(delay_factor)
self.write_channel(self.RETURN)
time.sleep(1 * delay_factor) | Adding a delay after login. |
def Exception(obj, eng, callbacks, exc_info):
exception_repr = ''.join(traceback.format_exception(*exc_info))
msg = "Error:\n%s" % (exception_repr)
eng.log.error(msg)
if obj:
obj.extra_data['_error_msg'] = exception_repr
obj.save(
status=obj.known_... | Handle general exceptions in workflow, saving states. |
def __prepare_gprest_call(self, requestURL, params=None, headers=None, restType='GET', body=None):
if self.__serviceAccount.is_iam_enabled():
auth = None
iam_api_key_header = {
self.__AUTHORIZATION_HEADER_KEY: str('API-KEY '+self.__serviceAccount.get_api_key())
... | Returns Authorization type and GP headers |
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
self.fileExtension = extension
with open(path, 'r') as hmetFile:
for line in hmetFile:
sline = line.strip().split()
try:
da... | Read HMET WES from File Method |
def frameify(self, state, data):
data = state.recv_buf + data
while data:
line, sep, rest = data.partition('\n')
if sep != '\n':
break
data = rest
if self.carriage_return and line[-1] == '\r':
line = line[:-1]
tr... | Split data into a sequence of lines. |
def reset(self):
self.start_point = self.end_point = None
self.is_emitting_point = False
self.rubber_band.reset(QgsWkbTypes.PolygonGeometry) | Clear the rubber band for the analysis extents. |
def _get_arg_tokens(cli):
arg = cli.input_processor.arg
return [
(Token.Prompt.Arg, '(arg: '),
(Token.Prompt.Arg.Text, str(arg)),
(Token.Prompt.Arg, ') '),
] | Tokens for the arg-prompt. |
def tt_comp(self, sampled_topics):
samples = sampled_topics.shape[0]
tt = np.zeros((self.V, self.K, samples))
for s in range(samples):
tt[:, :, s] = \
samplers_lda.tt_comp(self.tokens, sampled_topics[s, :],
self.N, self... | Compute term-topic matrix from sampled_topics. |
def __CheckValid(self, value):
"check for validity of value"
val = self.__val
self.is_valid = True
try:
val = set_float(value)
if self.__min is not None and (val < self.__min):
self.is_valid = False
val = self.__min
if s... | check for validity of value |
def list_elasticache(region, filter_by_kwargs):
conn = boto.elasticache.connect_to_region(region)
req = conn.describe_cache_clusters()
data = req["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]["CacheClusters"]
if filter_by_kwargs:
clusters = [x['CacheClusterId'] for x in data if... | List all ElastiCache Clusters. |
def _merge_fastqc(samples):
fastqc_list = collections.defaultdict(list)
seen = set()
for data in samples:
name = dd.get_sample_name(data)
if name in seen:
continue
seen.add(name)
fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "... | merge all fastqc samples into one by module |
def timestamp(num_params, p_levels, k_choices, N):
string = "_v%s_l%s_gs%s_k%s_N%s_%s.txt" % (num_params,
p_levels,
k_choices,
N,
... | Returns a uniform timestamp with parameter values for file identification |
def configure_namespacebrowser(self):
self.sig_namespace_view.connect(lambda data:
self.namespacebrowser.process_remote_view(data))
self.sig_var_properties.connect(lambda data:
self.namespacebrowser.set_var_properties(data)) | Configure associated namespace browser widget |
def resolve_remote(self):
for idx, target in enumerate(self._targets):
if isinstance(target, remote):
resolved = target.resolve()
if isinstance(resolved, str):
resolved = interpolate(resolved, env.sos_dict.dict())
self._targets[idx]... | If target is of remote type, resolve it |
def hydrate_sources(sources_field, glob_match_error_behavior):
path_globs = sources_field.path_globs.copy(glob_match_error_behavior=glob_match_error_behavior)
snapshot = yield Get(Snapshot, PathGlobs, path_globs)
fileset_with_spec = _eager_fileset_with_spec(
sources_field.address.spec_path,
sources_field.... | Given a SourcesField, request a Snapshot for its path_globs and create an EagerFilesetWithSpec. |
def create_peptidequant_lookup(fns, pqdb, poolnames, pepseq_colnr,
ms1_qcolpattern=None, isobqcolpattern=None,
psmnrpattern=None, fdrcolpattern=None,
pepcolpattern=None):
patterns = [ms1_qcolpattern, fdrcolpattern, pepcolpa... | Calls lower level function to create a peptide quant lookup |
def sum(self, vector):
return self.from_list(
[x + vector.vector[i] for i, x in self.to_list()]
) | Return a Vector instance as the vector sum of two vectors. |
def _hline(self):
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string | Print an horizontal line |
def _normalize_abmn(abmn):
abmn_2d = np.atleast_2d(abmn)
abmn_normalized = np.hstack((
np.sort(abmn_2d[:, 0:2], axis=1),
np.sort(abmn_2d[:, 2:4], axis=1),
))
return abmn_normalized | return a normalized version of abmn |
def seed(self):
for generation in self.generations:
for s in generation:
try:
if s.rseed is not None:
value = s.random(**s.parents.value)
except:
pass | Seed new initial values for the stochastics. |
def read_texture(filename, attrs=None):
filename = os.path.abspath(os.path.expanduser(filename))
try:
reader = get_reader(filename)
image = standard_reader_routine(reader, filename, attrs=attrs)
return vtki.image_to_texture(image)
except KeyError:
pass
return vtki.numpy_t... | Loads a ``vtkTexture`` from an image file. |
def geom_transform(geom, t_srs):
s_srs = geom.GetSpatialReference()
if not s_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(s_srs, t_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs) | Transform a geometry in place |
def pesach_dow(self):
jdn = conv.hdate_to_jdn(HebrewDate(self.hdate.year, Months.Nisan, 15))
return (jdn + 1) % 7 + 1 | Return the first day of week for Pesach. |
def rollout(self, **kwargs):
if kwargs.has_key('tau'):
timesteps = int(self.timesteps / kwargs['tau'])
else:
timesteps = self.timesteps
self.x_track = np.zeros(timesteps)
self.reset_state()
for t in range(timesteps):
self.x_track[t] = self.x
... | Generate x for open loop movements. |
def _remove_media(self,directory,files=None):
if not self._connectToFB():
logger.error("%s - Couldn't connect to fb")
return False
db=self._loadDB(directory)
if not files:
files=db.keys()
if isinstance(files,basestring):
files=[files]
... | Removes specified files from fb |
def retrieve_data(self):
url = self.config.get('url')
timeout = float(self.config.get('timeout', 10))
self.data = requests.get(url, verify=self.verify_ssl, timeout=timeout).content | retrieve data from an HTTP URL |
def resume(jid, state_id=None):
minion = salt.minion.MasterMinion(__opts__)
minion.functions['state.resume'](jid, state_id) | Remove a pause from a jid, allowing it to continue |
def _from_dict(cls, _dict):
args = {}
if 'element_pair' in _dict:
args['element_pair'] = [
ElementPair._from_dict(x) for x in (_dict.get('element_pair'))
]
if 'identical_text' in _dict:
args['identical_text'] = _dict.get('identical_text')
... | Initialize a AlignedElement object from a json dictionary. |
def upload(ctx, release, rebuild):
dist_path = Path(DIST_PATH)
if rebuild is False:
if not dist_path.exists() or not list(dist_path.glob('*')):
print("No distribution files found. Please run 'build' command first")
return
else:
ctx.invoke(build, force=True)
if rel... | Uploads distribuition files to pypi or pypitest. |
def use_file(self, enabled=True,
file_name=None,
level=logging.WARNING,
when='d',
interval=1,
backup_count=30,
delay=False,
utc=False,
at_time=None,
log_format=None,
... | Handler for logging to a file, rotating the log file at certain timed intervals. |
def fetch_user(app_id, token, ticket, url_detail='https://pswdless.appspot.com/rest/detail'):
return FetchUserWithValidation(app_id, token, ticket, url_detail) | Fetch the user deatil from Passwordless |
def schnorr_partial_combine(self, schnorr_sigs):
if not HAS_SCHNORR:
raise Exception("secp256k1_schnorr not enabled")
assert len(schnorr_sigs) > 0
sig64 = ffi.new('char [64]')
sig64sin = []
for sig in schnorr_sigs:
if not isinstance(sig, bytes):
... | Combine multiple Schnorr partial signatures. |
def push_tx(self, crypto, tx_hex):
url = "%s/pushtx" % self.base_url
return self.post_url(url, {'hex': tx_hex}).content | This method is untested. |
async def check_response(response, valid_response_codes):
if response.status == 204:
return True
if response.status in valid_response_codes:
_js = await response.json()
return _js
else:
raise PvApiResponseStatusError(response.status) | Check the response for correctness. |
async def listTriggers(self):
trigs = []
for (iden, trig) in self.cell.triggers.list():
useriden = trig['useriden']
if not (self.user.admin or useriden == self.user.iden):
continue
user = self.cell.auth.user(useriden)
trig['username'] = '<u... | Lists all the triggers that the current user is authorized to access |
def CreateCounterMetadata(metric_name, fields=None, docstring=None, units=None):
return rdf_stats.MetricMetadata(
varname=metric_name,
metric_type=rdf_stats.MetricMetadata.MetricType.COUNTER,
value_type=rdf_stats.MetricMetadata.ValueType.INT,
fields_defs=FieldDefinitionProtosFromTuples(fields ... | Helper function for creating MetricMetadata for counter metrics. |
def _format_numeric_sequence(self, _sequence, separator="."):
if not _sequence:
return colorize(_sequence, "purple")
_sequence = _sequence if _sequence is not None else self.obj
minus = (2 if self._depth > 0 else 0)
just_size = len(str(len(_sequence)))
out = []
... | Length of the highest index in chars = justification size |
def _load_extensions(self):
log.debug(u"loading all extensions : %s", self.extensions)
self.loaded_extensions = []
for f in self.extensions:
if not os.path.isabs(f):
f = os.path.abspath(f)
if not os.path.exists(f):
raise CoreError(u"Extensi... | Load all extension files into the namespace pykwalify.ext |
def update_default_channels(sender, instance, created, **kwargs):
if instance.default:
Channel.objects.filter(default=True).exclude(
channel_id=instance.channel_id
).update(default=False) | Post save hook to ensure that there is only one default |
def _get_func(cls, source_ver, target_ver):
matches = (
func for func in cls._upgrade_funcs
if func.source == source_ver and func.target == target_ver
)
try:
match, = matches
except ValueError:
raise ValueError(
f"No migration from {source_ver} to {target_ver}")
return match | Return exactly one function to convert from source to target |
def gen_str(src, dst):
return ReilBuilder.build(ReilMnemonic.STR, src, ReilEmptyOperand(), dst) | Return a STR instruction. |
def disconnect(self):
try:
os.kill(-self.pid, signal.SIGKILL)
except OSError:
pass
self.read_buffer = b''
self.write_buffer = b''
self.set_enabled(False)
if self.read_in_state_not_started:
self.print_lines(self.read_in_state_not_started... | We are no more interested in this remote process |
def summary(self):
return "== Model %s did not complete test %s due to error '%s'. ==" %\
(str(self.model), str(self.test), str(self.score)) | Summarize the performance of a model on a test. |
def peer_retrieve(key, relation_name='cluster'):
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
return relation_get(attribute=key, rid=cluster_rid,
unit=local_unit())
else:
raise ValueError('Unable to det... | Retrieve a named key from peer relation `relation_name`. |
def _iter_valid_subtotal_dicts(self):
for insertion_dict in self._insertion_dicts:
if not isinstance(insertion_dict, dict):
continue
if insertion_dict.get("function") != "subtotal":
continue
if not {"anchor", "args", "name"}.issubset(insertion_... | Generate each insertion dict that represents a valid subtotal. |
def _import_sub_module(module, name):
module = __import__(module.__name__ + "." + name)
for level in name.split("."):
module = getattr(module, level)
return module | import_sub_module will mimic the function of importlib.import_module |
def _set_pip_ssl(anaconda_dir):
if anaconda_dir:
cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem")
if os.path.exists(cert_file):
os.environ["PIP_CERT"] = cert_file | Set PIP SSL certificate to installed conda certificate to avoid SSL errors |
def AppendIndexDictionaryToFile(uniqueWords, ndxFile, ipFile, useShortFileName='Y'):
if useShortFileName == 'Y':
f = os.path.basename(ipFile)
else:
f = ipFile
with open(ndxFile, "a", encoding='utf-8', errors='replace') as ndx:
word_keys = uniqueWords.keys()
for word in sorted... | Save the list of unique words to the master list |
def label_for(self, name):
method = getattr(self, name)
if method.__doc__ and method.__doc__.strip():
return method.__doc__.strip().splitlines()[0]
return humanize(name.replace(self._prefix, '')) | Get a human readable label for a method given its name |
def wrap_onspace(self, text):
def _truncate(line, word):
return '{line}{part}{word}'.format(
line=line,
part=' \n'[(len(line[line.rfind('\n')+1:]) + len(word.split('\n', 1)[0]) >= self.width)],
word=word
)
... | When the text inside the column is longer then the width, will split by space and continue on the next line. |
def check_existance(f):
if not opath.isfile(f):
logging.error("Nanoget: File provided doesn't exist or the path is incorrect: {}".format(f))
sys.exit("File provided doesn't exist or the path is incorrect: {}".format(f)) | Check if the file supplied as input exists. |
def _rgbtomask(self, obj):
dat = obj.get_image().get_data()
return dat.sum(axis=2).astype(np.bool) | Convert RGB arrays from mask canvas object back to boolean mask. |
def Dlmk(l,m,k,phi1,phi2,theta1,theta2):
return exp(complex(0.,-m*phi1)) * dlmk(l,m,k,theta1) * \
exp(complex(0.,-k*gamma(phi1,phi2,theta1,theta2))) | returns value of D^l_mk as defined in allen, ottewill 97. |
def run(self):
config = self.state.document.settings.env.config
processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url)
process_nodes = []
for process in sorted(processes, key=itemgetter('name')):
process_nodes.extend(self.make_process_no... | Create a list of process definitions. |
def random_string(length):
letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ"
return "".join([random.choice(letters) for _ in range(length)]) | Generates a random alphanumeric string |
def main(argv=None):
try:
colorama.init()
if argv is None:
argv = sys.argv[1:]
_main(argv)
except RuntimeError as e:
print(colorama.Fore.RED + 'ERROR: ' +
str(e) + colorama.Style.RESET_ALL)
sys.exit(1)
else:
sys.exit(0) | Main entry point when the user runs the `trytravis` command. |
def curl_couchdb(url, method='GET', base_url=BASE_URL, data=None):
(username, password) = get_admin()
if username is None:
auth = None
else:
auth = (username, password)
if method == 'PUT':
req = requests.put('{}{}'.format(base_url, url), auth=auth, data=data)
elif method == '... | Launch a curl on CouchDB instance |
def doFindAny(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND', **params):
self._preFind(WHAT, SORT, SKIP, MAX, LOP)
for key in params:
self._addDBParam(key, params[key])
return self._doAction('-findany') | This function will perform the command -findany. |
def checkout_and_create_branch(repo, name):
local_branch = repo.branches[name] if name in repo.branches else None
if not local_branch:
if name in repo.remotes.origin.refs:
msg = repo.git.checkout(name)
_LOGGER.debug(msg)
return
local_branch = repo.create_head(... | Checkout branch. Create it if necessary |
def _configure_app(app_):
app_.url_map.strict_slashes = False
app_.config.from_object(default_settings)
app_.config.from_envvar('JOB_CONFIG', silent=True)
db_url = app_.config.get('SQLALCHEMY_DATABASE_URI')
if not db_url:
raise Exception('No db_url in config')
app_.wsgi_app = ProxyFix(ap... | Configure the Flask WSGI app. |
def _read_preference_for(self, session):
if session:
return session._txn_read_preference() or self.__read_preference
return self.__read_preference | Read only access to the read preference of this instance or session. |
def page_exists_on_disk(self, slug):
r = False
page_dir = os.path.join(self.dirs['source'], slug)
page_file_name = os.path.join(page_dir, slug + '.md')
if os.path.isdir(page_dir):
if os.path.isfile(page_file_name):
r = True
return r | Return true if post directory and post file both exist. |
def validate_target(self, target):
archs = target.split('/')
for arch in archs:
if not arch in self.archs:
return False
return True | Make sure that the specified target only contains architectures that we know about. |
def check_sentence_spacing(text):
err = "typography.symbols.sentence_spacing"
msg = u"More than two spaces after the period; use 1 or 2."
regex = "\. {3}"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) | Use no more than two spaces after a period. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.