_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q273800 | logger_init | test | def logger_init():
'''Initialize logger based on configuration
'''
handlers = []
logconf = config('logging')
if logconf['syslog']:
handlers.append(logging.handlers.SysLogHandler(address='/dev/log'))
if logconf['stderr']:
handlers.append(logging.StreamHandler(sys.stderr))
if logconf['file']:
handlers.append(logging.handlers.WatchedFileHandler(logconf['file']))
for handler in handlers:
handler.setFormatter(logging.Formatter(logconf['format']))
logging.root.addHandler(handler)
logging.root.setLevel(logconf['level'].upper())
logger.info('Log level set to %s' % logconf['level']) | python | {
"resource": ""
} |
q273801 | home | test | def home():
'''Serve the status page of the capture agent.
'''
# Get IDs of existing preview images
preview = config()['capture']['preview']
previewdir = config()['capture']['preview_dir']
preview = [p.replace('{{previewdir}}', previewdir) for p in preview]
preview = zip(preview, range(len(preview)))
preview = [p[1] for p in preview if os.path.isfile(p[0])]
# Get limits for recording table
try:
limit_upcoming = int(request.args.get('limit_upcoming', 5))
limit_processed = int(request.args.get('limit_processed', 15))
except ValueError:
limit_upcoming = 5
limit_processed = 15
db = get_session()
upcoming_events = db.query(UpcomingEvent)\
.order_by(UpcomingEvent.start)\
.limit(limit_upcoming)
recorded_events = db.query(RecordedEvent)\
.order_by(RecordedEvent.start.desc())\
.limit(limit_processed)
recording = get_service_status(Service.CAPTURE) == ServiceStatus.BUSY
uploading = get_service_status(Service.INGEST) == ServiceStatus.BUSY
processed = db.query(RecordedEvent).count()
upcoming = db.query(UpcomingEvent).count()
return render_template('home.html', preview=preview, config=config(),
recorded_events=recorded_events,
upcoming_events=upcoming_events,
recording=recording, uploading=uploading,
processed=processed, upcoming=upcoming,
limit_upcoming=limit_upcoming,
limit_processed=limit_processed,
dtfmt=dtfmt) | python | {
"resource": ""
} |
q273802 | serve_image | test | def serve_image(image_id):
'''Serve the preview image with the given id
'''
try:
preview_dir = config()['capture']['preview_dir']
filepath = config()['capture']['preview'][image_id]
filepath = filepath.replace('{{previewdir}}', preview_dir)
filepath = os.path.abspath(filepath)
if os.path.isfile(filepath):
directory, filename = filepath.rsplit('/', 1)
return send_from_directory(directory, filename)
except (IndexError, KeyError):
pass
return '', 404 | python | {
"resource": ""
} |
q273803 | run_all | test | def run_all(*modules):
'''Start all services.
'''
processes = [multiprocessing.Process(target=mod.run) for mod in modules]
for p in processes:
p.start()
for p in processes:
p.join() | python | {
"resource": ""
} |
q273804 | parse_ical | test | def parse_ical(vcal):
'''Parse Opencast schedule iCalendar file and return events as dict
'''
vcal = vcal.replace('\r\n ', '').replace('\r\n\r\n', '\r\n')
vevents = vcal.split('\r\nBEGIN:VEVENT\r\n')
del(vevents[0])
events = []
for vevent in vevents:
event = {}
for line in vevent.split('\r\n'):
line = line.split(':', 1)
key = line[0].lower()
if len(line) <= 1 or key == 'end':
continue
if key.startswith('dt'):
event[key] = unix_ts(dateutil.parser.parse(line[1]))
continue
if not key.startswith('attach'):
event[key] = line[1]
continue
# finally handle attachments
event['attach'] = event.get('attach', [])
attachment = {}
for x in [x.split('=') for x in line[0].split(';')]:
if x[0].lower() in ['fmttype', 'x-apple-filename']:
attachment[x[0].lower()] = x[1]
attachment['data'] = b64decode(line[1]).decode('utf-8')
event['attach'].append(attachment)
events.append(event)
return events | python | {
"resource": ""
} |
q273805 | get_schedule | test | def get_schedule():
'''Try to load schedule from the Matterhorn core. Returns a valid schedule
or None on failure.
'''
params = {'agentid': config()['agent']['name'].encode('utf8')}
lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60
if lookahead:
params['cutoff'] = str((timestamp() + lookahead) * 1000)
uri = '%s/calendars?%s' % (config()['service-scheduler'][0],
urlencode(params))
try:
vcal = http_request(uri)
except pycurl.error as e:
logger.error('Could not get schedule: %s' % e)
return
try:
cal = parse_ical(vcal.decode('utf-8'))
except Exception:
logger.error('Could not parse ical')
logger.error(traceback.format_exc())
return
db = get_session()
db.query(UpcomingEvent).delete()
for event in cal:
# Ignore events that have already ended
if event['dtend'] <= timestamp():
continue
e = UpcomingEvent()
e.start = event['dtstart']
e.end = event['dtend']
e.uid = event.get('uid')
e.title = event.get('summary')
e.set_data(event)
db.add(e)
db.commit() | python | {
"resource": ""
} |
q273806 | control_loop | test | def control_loop():
'''Main loop, retrieving the schedule.
'''
set_service_status(Service.SCHEDULE, ServiceStatus.BUSY)
notify.notify('READY=1')
while not terminate():
notify.notify('WATCHDOG=1')
# Try getting an updated schedule
get_schedule()
session = get_session()
next_event = session.query(UpcomingEvent)\
.filter(UpcomingEvent.end > timestamp())\
.order_by(UpcomingEvent.start)\
.first()
if next_event:
logger.info('Next scheduled recording: %s',
datetime.fromtimestamp(next_event.start))
notify.notify('STATUS=Next scheduled recording: %s' %
datetime.fromtimestamp(next_event.start))
else:
logger.info('No scheduled recording')
notify.notify('STATUS=No scheduled recording')
session.close()
next_update = timestamp() + config()['agent']['update_frequency']
while not terminate() and timestamp() < next_update:
time.sleep(0.1)
logger.info('Shutting down schedule service')
set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED) | python | {
"resource": ""
} |
q273807 | control_loop | test | def control_loop():
'''Main loop, updating the capture agent state.
'''
set_service_status(Service.AGENTSTATE, ServiceStatus.BUSY)
notify.notify('READY=1')
notify.notify('STATUS=Running')
while not terminate():
notify.notify('WATCHDOG=1')
update_agent_state()
next_update = timestamp() + config()['agent']['update_frequency']
while not terminate() and timestamp() < next_update:
time.sleep(0.1)
logger.info('Shutting down agentstate service')
set_service_status(Service.AGENTSTATE, ServiceStatus.STOPPED) | python | {
"resource": ""
} |
q273808 | make_error_response | test | def make_error_response(error, status=500):
''' Return a response with a jsonapi error object
'''
content = {
'errors': [{
'status': status,
'title': error
}]
}
return make_response(jsonify(content), status) | python | {
"resource": ""
} |
q273809 | make_data_response | test | def make_data_response(data, status=200):
''' Return a response with a list of jsonapi data objects
'''
content = {'data': ensurelist(data)}
return make_response(jsonify(content), status) | python | {
"resource": ""
} |
q273810 | internal_state | test | def internal_state():
'''Serve a json representation of internal agentstate as meta data
'''
data = {'services': {
'capture': ServiceStatus.str(get_service_status(Service.CAPTURE)),
'ingest': ServiceStatus.str(get_service_status(Service.INGEST)),
'schedule': ServiceStatus.str(get_service_status(Service.SCHEDULE)),
'agentstate': ServiceStatus.str(get_service_status(Service.AGENTSTATE))
}
}
return make_response(jsonify({'meta': data})) | python | {
"resource": ""
} |
q273811 | events | test | def events():
'''Serve a JSON representation of events
'''
db = get_session()
upcoming_events = db.query(UpcomingEvent)\
.order_by(UpcomingEvent.start)
recorded_events = db.query(RecordedEvent)\
.order_by(RecordedEvent.start.desc())
result = [event.serialize() for event in upcoming_events]
result += [event.serialize() for event in recorded_events]
return make_data_response(result) | python | {
"resource": ""
} |
q273812 | event | test | def event(uid):
'''Return a specific events JSON
'''
db = get_session()
event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first() \
or db.query(UpcomingEvent).filter(UpcomingEvent.uid == uid).first()
if event:
return make_data_response(event.serialize())
return make_error_response('No event with specified uid', 404) | python | {
"resource": ""
} |
q273813 | delete_event | test | def delete_event(uid):
'''Delete a specific event identified by its uid. Note that only recorded
events can be deleted. Events in the buffer for upcoming events are
regularly replaced anyway and a manual removal could have unpredictable
effects.
Use ?hard=true parameter to delete the recorded files on disk as well.
Returns 204 if the action was successful.
Returns 404 if event does not exist
'''
logger.info('deleting event %s via api', uid)
db = get_session()
events = db.query(RecordedEvent).filter(RecordedEvent.uid == uid)
if not events.count():
return make_error_response('No event with specified uid', 404)
hard_delete = request.args.get('hard', 'false')
if hard_delete == 'true':
logger.info('deleting recorded files at %s', events[0].directory())
shutil.rmtree(events[0].directory())
events.delete()
db.commit()
return make_response('', 204) | python | {
"resource": ""
} |
q273814 | modify_event | test | def modify_event(uid):
'''Modify an event specified by its uid. The modifications for the event
are expected as JSON with the content type correctly set in the request.
Note that this method works for recorded events only. Upcoming events part
of the scheduler cache cannot be modified.
'''
try:
data = request.get_json()['data'][0]
if data['type'] != 'event' or data['id'] != uid:
return make_error_response('Invalid data', 400)
# Check attributes
for key in data['attributes'].keys():
if key not in ('status', 'start', 'end'):
return make_error_response('Invalid data', 400)
# Check new status
new_status = data['attributes'].get('status')
if new_status:
new_status = new_status.upper().replace(' ', '_')
data['attributes']['status'] = int(getattr(Status, new_status))
except Exception:
return make_error_response('Invalid data', 400)
db = get_session()
event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first()
if not event:
return make_error_response('No event with specified uid', 404)
event.start = data['attributes'].get('start', event.start)
event.end = data['attributes'].get('end', event.end)
event.status = data['attributes'].get('status', event.status)
logger.debug('Updating event %s via api', uid)
db.commit()
return make_data_response(event.serialize()) | python | {
"resource": ""
} |
q273815 | get_config_params | test | def get_config_params(properties):
'''Extract the set of configuration parameters from the properties attached
to the schedule
'''
param = []
wdef = ''
for prop in properties.split('\n'):
if prop.startswith('org.opencastproject.workflow.config'):
key, val = prop.split('=', 1)
key = key.split('.')[-1]
param.append((key, val))
elif prop.startswith('org.opencastproject.workflow.definition'):
wdef = prop.split('=', 1)[-1]
return wdef, param | python | {
"resource": ""
} |
q273816 | ingest | test | def ingest(event):
'''Ingest a finished recording to the Opencast server.
'''
# Update status
set_service_status(Service.INGEST, ServiceStatus.BUSY)
notify.notify('STATUS=Uploading')
recording_state(event.uid, 'uploading')
update_event_status(event, Status.UPLOADING)
# Select ingest service
# The ingest service to use is selected at random from the available
# ingest services to ensure that not every capture agent uses the same
# service at the same time
service = config('service-ingest')
service = service[randrange(0, len(service))]
logger.info('Selecting ingest service to use: ' + service)
# create mediapackage
logger.info('Creating new mediapackage')
mediapackage = http_request(service + '/createMediaPackage')
# extract workflow_def, workflow_config and add DC catalogs
prop = 'org.opencastproject.capture.agent.properties'
dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/'
for attachment in event.get_data().get('attach'):
data = attachment.get('data')
if attachment.get('x-apple-filename') == prop:
workflow_def, workflow_config = get_config_params(data)
# Check for dublincore catalogs
elif attachment.get('fmttype') == 'application/xml' and dcns in data:
name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0]
logger.info('Adding %s DC catalog' % name)
fields = [('mediaPackage', mediapackage),
('flavor', 'dublincore/%s' % name),
('dublinCore', data.encode('utf-8'))]
mediapackage = http_request(service + '/addDCCatalog', fields)
# add track
for (flavor, track) in event.get_tracks():
logger.info('Adding track ({0} -> {1})'.format(flavor, track))
track = track.encode('ascii', 'ignore')
fields = [('mediaPackage', mediapackage), ('flavor', flavor),
('BODY1', (pycurl.FORM_FILE, track))]
mediapackage = http_request(service + '/addTrack', fields)
# ingest
logger.info('Ingest recording')
fields = [('mediaPackage', mediapackage)]
if workflow_def:
fields.append(('workflowDefinitionId', workflow_def))
if event.uid:
fields.append(('workflowInstanceId',
event.uid.encode('ascii', 'ignore')))
fields += workflow_config
mediapackage = http_request(service + '/ingest', fields)
# Update status
recording_state(event.uid, 'upload_finished')
update_event_status(event, Status.FINISHED_UPLOADING)
notify.notify('STATUS=Running')
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
logger.info('Finished ingest') | python | {
"resource": ""
} |
q273817 | start_capture | test | def start_capture(upcoming_event):
'''Start the capture process, creating all necessary files and directories
as well as ingesting the captured files if no backup mode is configured.
'''
logger.info('Start recording')
# First move event to recording_event table
db = get_session()
event = db.query(RecordedEvent)\
.filter(RecordedEvent.uid == upcoming_event.uid)\
.filter(RecordedEvent.start == upcoming_event.start)\
.first()
if not event:
event = RecordedEvent(upcoming_event)
db.add(event)
db.commit()
try_mkdir(config()['capture']['directory'])
os.mkdir(event.directory())
# Set state
update_event_status(event, Status.RECORDING)
recording_state(event.uid, 'capturing')
set_service_status_immediate(Service.CAPTURE, ServiceStatus.BUSY)
# Recording
tracks = recording_command(event)
event.set_tracks(tracks)
db.commit()
# Set status
update_event_status(event, Status.FINISHED_RECORDING)
recording_state(event.uid, 'capture_finished')
set_service_status_immediate(Service.CAPTURE, ServiceStatus.IDLE)
logger.info('Finished recording') | python | {
"resource": ""
} |
q273818 | ExampleFragmentView.render_to_fragment | test | def render_to_fragment(self, request, **kwargs):
"""
Returns a simple fragment
"""
fragment = Fragment(TEST_HTML)
fragment.add_javascript(TEST_JS)
fragment.add_css(TEST_CSS)
return fragment | python | {
"resource": ""
} |
q273819 | Fragment.resources | test | def resources(self):
"""
Returns list of unique `FragmentResource`s by order of first appearance.
"""
seen = set()
# seen.add always returns None, so 'not seen.add(x)' is always True,
# but will only be called if the value is not already in seen (because
# 'and' short-circuits)
return [x for x in self._resources if x not in seen and not seen.add(x)] | python | {
"resource": ""
} |
q273820 | Fragment.to_dict | test | def to_dict(self):
"""
Returns the fragment in a dictionary representation.
"""
return {
'content': self.content,
'resources': [r._asdict() for r in self.resources], # pylint: disable=W0212
'js_init_fn': self.js_init_fn,
'js_init_version': self.js_init_version,
'json_init_args': self.json_init_args
} | python | {
"resource": ""
} |
q273821 | Fragment.from_dict | test | def from_dict(cls, pods):
"""
Returns a new Fragment from a dictionary representation.
"""
frag = cls()
frag.content = pods['content']
frag._resources = [FragmentResource(**d) for d in pods['resources']] # pylint: disable=protected-access
frag.js_init_fn = pods['js_init_fn']
frag.js_init_version = pods['js_init_version']
frag.json_init_args = pods['json_init_args']
return frag | python | {
"resource": ""
} |
q273822 | Fragment.add_content | test | def add_content(self, content):
"""
Add content to this fragment.
`content` is a Unicode string, HTML to append to the body of the
fragment. It must not contain a ``<body>`` tag, or otherwise assume
that it is the only content on the page.
"""
assert isinstance(content, six.text_type)
self.content += content | python | {
"resource": ""
} |
q273823 | Fragment.add_resource | test | def add_resource(self, text, mimetype, placement=None):
"""
Add a resource needed by this Fragment.
Other helpers, such as :func:`add_css` or :func:`add_javascript` are
more convenient for those common types of resource.
`text`: the actual text of this resource, as a unicode string.
`mimetype`: the MIME type of the resource.
`placement`: where on the page the resource should be placed:
None: let the Fragment choose based on the MIME type.
"head": put this resource in the ``<head>`` of the page.
"foot": put this resource at the end of the ``<body>`` of the
page.
"""
if not placement:
placement = self._default_placement(mimetype)
res = FragmentResource('text', text, mimetype, placement)
self._resources.append(res) | python | {
"resource": ""
} |
q273824 | Fragment.add_resource_url | test | def add_resource_url(self, url, mimetype, placement=None):
"""
Add a resource by URL needed by this Fragment.
Other helpers, such as :func:`add_css_url` or
:func:`add_javascript_url` are more convenent for those common types of
resource.
`url`: the URL to the resource.
Other parameters are as defined for :func:`add_resource`.
"""
if not placement:
placement = self._default_placement(mimetype)
self._resources.append(FragmentResource('url', url, mimetype, placement)) | python | {
"resource": ""
} |
q273825 | Fragment.initialize_js | test | def initialize_js(self, js_func, json_args=None):
"""
Register a Javascript function to initialize the Javascript resources.
`js_func` is the name of a Javascript function defined by one of the
Javascript resources. As part of setting up the browser's runtime
environment, the function will be invoked, passing a runtime object
and a DOM element.
"""
self.js_init_fn = js_func
self.js_init_version = JS_API_VERSION
if json_args:
self.json_init_args = json_args | python | {
"resource": ""
} |
q273826 | Fragment.resources_to_html | test | def resources_to_html(self, placement):
"""
Get some resource HTML for this Fragment.
`placement` is "head" or "foot".
Returns a unicode string, the HTML for the head or foot of the page.
"""
# - non url js could be wrapped in an anonymous function
# - non url css could be rewritten to match the wrapper tag
return '\n'.join(
self.resource_to_html(resource)
for resource in self.resources
if resource.placement == placement
) | python | {
"resource": ""
} |
q273827 | Fragment.resource_to_html | test | def resource_to_html(resource):
"""
Returns `resource` wrapped in the appropriate html tag for it's mimetype.
"""
if resource.mimetype == "text/css":
if resource.kind == "text":
return u"<style type='text/css'>\n%s\n</style>" % resource.data
elif resource.kind == "url":
return u"<link rel='stylesheet' href='%s' type='text/css'>" % resource.data
else:
raise Exception("Unrecognized resource kind %r" % resource.kind)
elif resource.mimetype == "application/javascript":
if resource.kind == "text":
return u"<script>\n%s\n</script>" % resource.data
elif resource.kind == "url":
return u"<script src='%s' type='application/javascript'></script>" % resource.data
else:
raise Exception("Unrecognized resource kind %r" % resource.kind)
elif resource.mimetype == "text/html":
assert resource.kind == "text"
return resource.data
else:
raise Exception("Unrecognized mimetype %r" % resource.mimetype) | python | {
"resource": ""
} |
q273828 | FragmentView.get | test | def get(self, request, *args, **kwargs):
"""
Render a fragment to HTML or return JSON describing it, based on the request.
"""
fragment = self.render_to_fragment(request, **kwargs)
response_format = request.GET.get('format') or request.POST.get('format') or 'html'
if response_format == 'json' or WEB_FRAGMENT_RESPONSE_TYPE in request.META.get('HTTP_ACCEPT', 'text/html'):
return JsonResponse(fragment.to_dict())
else:
return self.render_standalone_response(request, fragment, **kwargs) | python | {
"resource": ""
} |
q273829 | FragmentView.render_standalone_response | test | def render_standalone_response(self, request, fragment, **kwargs): # pylint: disable=unused-argument
"""
Renders a standalone page as a response for the specified fragment.
"""
if fragment is None:
return HttpResponse(status=204)
html = self.render_to_standalone_html(request, fragment, **kwargs)
return HttpResponse(html) | python | {
"resource": ""
} |
q273830 | FragmentView.render_to_standalone_html | test | def render_to_standalone_html(self, request, fragment, **kwargs): # pylint: disable=unused-argument
"""
Render the specified fragment to HTML for a standalone page.
"""
template = get_template(STANDALONE_TEMPLATE_NAME)
context = {
'head_html': fragment.head_html(),
'body_html': fragment.body_html(),
'foot_html': fragment.foot_html(),
}
return template.render(context) | python | {
"resource": ""
} |
q273831 | calc | test | def calc(pvalues, lamb):
""" meaning pvalues presorted i descending order"""
m = len(pvalues)
pi0 = (pvalues > lamb).sum() / ((1 - lamb)*m)
pFDR = np.ones(m)
print("pFDR y Pr fastPow")
for i in range(m):
y = pvalues[i]
Pr = max(1, m - i) / float(m)
pFDR[i] = (pi0 * y) / (Pr * (1 - math.pow(1-y, m)))
print(i, pFDR[i], y, Pr, 1.0 - math.pow(1-y, m))
num_null = pi0*m
num_alt = m - num_null
num_negs = np.array(range(m))
num_pos = m - num_negs
pp = num_pos / float(m)
qvalues = np.ones(m)
qvalues[0] = pFDR[0]
for i in range(m-1):
qvalues[i+1] = min(qvalues[i], pFDR[i+1])
sens = ((1.0 - qvalues) * num_pos) / num_alt
sens[sens > 1.0] = 1.0
df = pd.DataFrame(dict(
pvalue=pvalues,
qvalue=qvalues,
FDR=pFDR,
percentile_positive=pp,
sens=sens
))
df["svalue"] = df.sens[::-1].cummax()[::-1]
return df, num_null, m | python | {
"resource": ""
} |
q273832 | to_one_dim_array | test | def to_one_dim_array(values, as_type=None):
""" Converts list or flattens n-dim array to 1-dim array if possible """
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.float32)
elif isinstance(values, pd.Series):
values = values.values
values = values.flatten()
assert values.ndim == 1, "values has wrong dimension"
if as_type is not None:
return values.astype(as_type)
return values | python | {
"resource": ""
} |
q273833 | lookup_values_from_error_table | test | def lookup_values_from_error_table(scores, err_df):
""" Find matching q-value for each score in 'scores' """
ix = find_nearest_matches(np.float32(err_df.cutoff.values), np.float32(scores))
return err_df.pvalue.iloc[ix].values, err_df.svalue.iloc[ix].values, err_df.pep.iloc[ix].values, err_df.qvalue.iloc[ix].values | python | {
"resource": ""
} |
q273834 | posterior_chromatogram_hypotheses_fast | test | def posterior_chromatogram_hypotheses_fast(experiment, prior_chrom_null):
""" Compute posterior probabilities for each chromatogram
For each chromatogram (each group_id / peptide precursor), all hypothesis of all peaks
being correct (and all others false) as well as the h0 (all peaks are
false) are computed.
The prior probability that the are given in the function
This assumes that the input data is sorted by tg_num_id
Args:
experiment(:class:`data_handling.Multipeptide`): the data of one experiment
prior_chrom_null(float): the prior probability that any precursor
is absent (all peaks are false)
Returns:
tuple(hypothesis, h0): two vectors that contain for each entry in
the input dataframe the probabilities for the hypothesis that the
peak is correct and the probability for the h0
"""
tg_ids = experiment.df.tg_num_id.values
pp_values = 1-experiment.df["pep"].values
current_tg_id = tg_ids[0]
scores = []
final_result = []
final_result_h0 = []
for i in range(tg_ids.shape[0]):
id_ = tg_ids[i]
if id_ != current_tg_id:
# Actual computation for a single transition group (chromatogram)
prior_pg_true = (1.0 - prior_chrom_null) / len(scores)
rr = single_chromatogram_hypothesis_fast(
np.array(scores), prior_chrom_null, prior_pg_true)
final_result.extend(rr[1:])
final_result_h0.extend(rr[0] for i in range(len(scores)))
# Reset for next cycle
scores = []
current_tg_id = id_
scores.append(1.0 - pp_values[i])
# Last cycle
prior_pg_true = (1.0 - prior_chrom_null) / len(scores)
rr = single_chromatogram_hypothesis_fast(np.array(scores), prior_chrom_null, prior_pg_true)
final_result.extend(rr[1:])
final_result_h0.extend([rr[0]] * len(scores))
return final_result, final_result_h0 | python | {
"resource": ""
} |
q273835 | final_err_table | test | def final_err_table(df, num_cut_offs=51):
""" Create artificial cutoff sample points from given range of cutoff
values in df, number of sample points is 'num_cut_offs' """
cutoffs = df.cutoff.values
min_ = min(cutoffs)
max_ = max(cutoffs)
# extend max_ and min_ by 5 % of full range
margin = (max_ - min_) * 0.05
sampled_cutoffs = np.linspace(min_ - margin, max_ + margin, num_cut_offs, dtype=np.float32)
# find best matching row index for each sampled cut off:
ix = find_nearest_matches(np.float32(df.cutoff.values), sampled_cutoffs)
# create sub dataframe:
sampled_df = df.iloc[ix].copy()
sampled_df.cutoff = sampled_cutoffs
# remove 'old' index from input df:
sampled_df.reset_index(inplace=True, drop=True)
return sampled_df | python | {
"resource": ""
} |
q273836 | summary_err_table | test | def summary_err_table(df, qvalues=[0, 0.01, 0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5]):
""" Summary error table for some typical q-values """
qvalues = to_one_dim_array(qvalues)
# find best matching fows in df for given qvalues:
ix = find_nearest_matches(np.float32(df.qvalue.values), qvalues)
# extract sub table
df_sub = df.iloc[ix].copy()
# remove duplicate hits, mark them with None / NAN:
for i_sub, (i0, i1) in enumerate(zip(ix, ix[1:])):
if i1 == i0:
df_sub.iloc[i_sub + 1, :] = None
# attach q values column
df_sub.qvalue = qvalues
# remove old index from original df:
df_sub.reset_index(inplace=True, drop=True)
return df_sub[['qvalue','pvalue','svalue','pep','fdr','fnr','fpr','tp','tn','fp','fn','cutoff']] | python | {
"resource": ""
} |
q273837 | error_statistics | test | def error_statistics(target_scores, decoy_scores, parametric, pfdr, pi0_lambda, pi0_method = "smoother", pi0_smooth_df = 3, pi0_smooth_log_pi0 = False, compute_lfdr = False, lfdr_trunc = True, lfdr_monotone = True, lfdr_transf = "probit", lfdr_adj = 1.5, lfdr_eps = np.power(10.0,-8)):
""" Takes list of decoy and target scores and creates error statistics for target values """
target_scores = to_one_dim_array(target_scores)
target_scores = np.sort(target_scores[~np.isnan(target_scores)])
decoy_scores = to_one_dim_array(decoy_scores)
decoy_scores = np.sort(decoy_scores[~np.isnan(decoy_scores)])
# compute p-values using decoy scores
if parametric:
# parametric
target_pvalues = pnorm(target_scores, decoy_scores)
else:
# non-parametric
target_pvalues = pemp(target_scores, decoy_scores)
# estimate pi0
pi0 = pi0est(target_pvalues, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0)
# compute q-value
target_qvalues = qvalue(target_pvalues, pi0['pi0'], pfdr)
# compute other metrics
metrics = stat_metrics(target_pvalues, pi0['pi0'], pfdr)
# generate main statistics table
error_stat = pd.DataFrame({'cutoff': target_scores, 'pvalue': target_pvalues, 'qvalue': target_qvalues, 'svalue': metrics['svalue'], 'tp': metrics['tp'], 'fp': metrics['fp'], 'tn': metrics['tn'], 'fn': metrics['fn'], 'fpr': metrics['fpr'], 'fdr': metrics['fdr'], 'fnr': metrics['fnr']})
# compute lfdr / PEP
if compute_lfdr:
error_stat['pep'] = lfdr(target_pvalues, pi0['pi0'], lfdr_trunc, lfdr_monotone, lfdr_transf, lfdr_adj, lfdr_eps)
return error_stat, pi0 | python | {
"resource": ""
} |
q273838 | find_cutoff | test | def find_cutoff(tt_scores, td_scores, cutoff_fdr, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0):
""" Finds cut off target score for specified false discovery rate fdr """
error_stat, pi0 = error_statistics(tt_scores, td_scores, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, False)
if not len(error_stat):
raise click.ClickException("Too little data for calculating error statistcs.")
i0 = (error_stat.qvalue - cutoff_fdr).abs().idxmin()
cutoff = error_stat.iloc[i0]["cutoff"]
return cutoff | python | {
"resource": ""
} |
q273839 | score | test | def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test):
"""
Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
# Prepare XGBoost-specific parameters
xgb_hyperparams = {'autotune': xgb_autotune, 'autotune_num_rounds': 10, 'num_boost_round': 100, 'early_stopping_rounds': 10, 'test_size': 0.33}
xgb_params = {'eta': 0.3, 'gamma': 0, 'max_depth': 6, 'min_child_weight': 1, 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': 1, 'alpha': 0, 'scale_pos_weight': 1, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
xgb_params_space = {'eta': hp.uniform('eta', 0.0, 0.3), 'gamma': hp.uniform('gamma', 0.0, 0.5), 'max_depth': hp.quniform('max_depth', 2, 8, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 5, 1), 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': hp.uniform('lambda', 0.0, 1.0), 'alpha': hp.uniform('alpha', 0.0, 1.0), 'scale_pos_weight': 1.0, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
if not apply_weights:
PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test).run()
else:
PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights).run() | python | {
"resource": ""
} |
q273840 | ipf | test | def ipf(infile, outfile, ipf_ms1_scoring, ipf_ms2_scoring, ipf_h0, ipf_grouped_fdr, ipf_max_precursor_pep, ipf_max_peakgroup_pep, ipf_max_precursor_peakgroup_pep, ipf_max_transition_pep):
"""
Infer peptidoforms after scoring of MS1, MS2 and transition-level data.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_peptidoforms(infile, outfile, ipf_ms1_scoring, ipf_ms2_scoring, ipf_h0, ipf_grouped_fdr, ipf_max_precursor_pep, ipf_max_peakgroup_pep, ipf_max_precursor_peakgroup_pep, ipf_max_transition_pep) | python | {
"resource": ""
} |
q273841 | peptide | test | def peptide(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps):
"""
Infer peptides and conduct error-rate estimation in different contexts.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_peptides(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps) | python | {
"resource": ""
} |
q273842 | protein | test | def protein(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps):
"""
Infer proteins and conduct error-rate estimation in different contexts.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_proteins(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps) | python | {
"resource": ""
} |
q273843 | subsample | test | def subsample(infile, outfile, subsample_ratio, test):
"""
Subsample OpenSWATH file to minimum for integrated scoring
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
subsample_osw(infile, outfile, subsample_ratio, test) | python | {
"resource": ""
} |
q273844 | reduce | test | def reduce(infile, outfile):
"""
Reduce scored PyProphet file to minimum for global scoring
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
reduce_osw(infile, outfile) | python | {
"resource": ""
} |
q273845 | backpropagate | test | def backpropagate(infile, outfile, apply_scores):
"""
Backpropagate multi-run peptide and protein scores to single files
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
backpropagate_oswr(infile, outfile, apply_scores) | python | {
"resource": ""
} |
q273846 | filter | test | def filter(sqmassfiles, infile, max_precursor_pep, max_peakgroup_pep, max_transition_pep):
"""
Filter sqMass files
"""
filter_sqmass(sqmassfiles, infile, max_precursor_pep, max_peakgroup_pep, max_transition_pep) | python | {
"resource": ""
} |
q273847 | GWS.get_group_by_id | test | def get_group_by_id(self, group_id):
"""
Returns a restclients.Group object for the group identified by the
passed group ID.
"""
self._valid_group_id(group_id)
url = "{}/group/{}".format(self.API, group_id)
data = self._get_resource(url)
return self._group_from_json(data.get("data")) | python | {
"resource": ""
} |
q273848 | GWS.create_group | test | def create_group(self, group):
"""
Creates a group from the passed restclients.Group object.
"""
self._valid_group_id(group.id)
body = {"data": group.json_data()}
url = "{}/group/{}".format(self.API, group.name)
data = self._put_resource(url, headers={}, body=body)
return self._group_from_json(data.get("data")) | python | {
"resource": ""
} |
q273849 | GWS.delete_group | test | def delete_group(self, group_id):
"""
Deletes the group identified by the passed group ID.
"""
self._valid_group_id(group_id)
url = "{}/group/{}".format(self.API, group_id)
self._delete_resource(url)
return True | python | {
"resource": ""
} |
q273850 | GWS.get_members | test | def get_members(self, group_id):
"""
Returns a list of restclients.GroupMember objects for the group
identified by the passed group ID.
"""
self._valid_group_id(group_id)
url = "{}/group/{}/member".format(self.API, group_id)
data = self._get_resource(url)
members = []
for datum in data.get("data"):
members.append(self._group_member_from_json(datum))
return members | python | {
"resource": ""
} |
q273851 | GWS.update_members | test | def update_members(self, group_id, members):
"""
Updates the membership of the group represented by the passed group id.
Returns a list of members not found.
"""
self._valid_group_id(group_id)
body = {"data": [m.json_data() for m in members]}
headers = {"If-Match": "*"}
url = "{}/group/{}/member".format(self.API, group_id)
data = self._put_resource(url, headers, body)
errors = data.get("errors", [])
if len(errors):
return errors[0].get("notFound", [])
return [] | python | {
"resource": ""
} |
q273852 | GWS.get_effective_member_count | test | def get_effective_member_count(self, group_id):
"""
Returns a count of effective members for the group identified by the
passed group ID.
"""
self._valid_group_id(group_id)
url = "{}/group/{}/effective_member?view=count".format(self.API,
group_id)
data = self._get_resource(url)
count = data.get("data").get("count")
return int(count) | python | {
"resource": ""
} |
q273853 | GWS.is_effective_member | test | def is_effective_member(self, group_id, netid):
"""
Returns True if the netid is in the group, False otherwise.
"""
self._valid_group_id(group_id)
# GWS doesn't accept EPPNs on effective member checks, for UW users
netid = re.sub('@washington.edu', '', netid)
url = "{}/group/{}/effective_member/{}".format(self.API,
group_id,
netid)
try:
data = self._get_resource(url)
return True # 200
except DataFailureException as ex:
if ex.status == 404:
return False
else:
raise | python | {
"resource": ""
} |
q273854 | modify_conf | test | def modify_conf():
"""
pip install redbaron
"""
import redbaron
import ubelt as ub
conf_path = 'docs/conf.py'
source = ub.readfrom(conf_path)
red = redbaron.RedBaron(source)
# Insert custom extensions
extra_extensions = [
'"sphinxcontrib.napoleon"'
]
ext_node = red.find('name', value='extensions').parent
ext_node.value.value.extend(extra_extensions)
# Overwrite theme to read-the-docs
theme_node = red.find('name', value='html_theme').parent
theme_node.value.value = '"sphinx_rtd_theme"'
ub.writeto(conf_path, red.dumps()) | python | {
"resource": ""
} |
q273855 | Group.create_dataset | test | def create_dataset(self, name, shape=None, dtype=None, data=None,
sparse_format=None, indptr_dtype=np.int64, indices_dtype=np.int32,
**kwargs):
"""Create 3 datasets in a group to represent the sparse array.
Parameters
----------
sparse_format:
"""
if isinstance(data, Dataset):
assert sparse_format is None
group = self.create_group(name)
group.attrs['h5sparse_format'] = data.attrs['h5sparse_format']
group.attrs['h5sparse_shape'] = data.attrs['h5sparse_shape']
group.create_dataset('data', data=data.h5py_group['data'],
dtype=dtype, **kwargs)
group.create_dataset('indices', data=data.h5py_group['indices'],
dtype=indices_dtype, **kwargs)
group.create_dataset('indptr', data=data.h5py_group['indptr'],
dtype=indptr_dtype, **kwargs)
elif ss.issparse(data):
if sparse_format is not None:
format_class = get_format_class(sparse_format)
data = format_class(data)
group = self.create_group(name)
group.attrs['h5sparse_format'] = get_format_str(data)
group.attrs['h5sparse_shape'] = data.shape
group.create_dataset('data', data=data.data, dtype=dtype, **kwargs)
group.create_dataset('indices', data=data.indices, dtype=indices_dtype, **kwargs)
group.create_dataset('indptr', data=data.indptr, dtype=indptr_dtype, **kwargs)
elif data is None and sparse_format is not None:
format_class = get_format_class(sparse_format)
if dtype is None:
dtype = np.float64
if shape is None:
shape = (0, 0)
data = format_class(shape, dtype=dtype)
group = self.create_group(name)
group.attrs['h5sparse_format'] = get_format_str(data)
group.attrs['h5sparse_shape'] = data.shape
group.create_dataset('data', data=data.data, dtype=dtype, **kwargs)
group.create_dataset('indices', data=data.indices, dtype=indices_dtype, **kwargs)
group.create_dataset('indptr', data=data.indptr, dtype=indptr_dtype, **kwargs)
else:
# forward the arguments to h5py
assert sparse_format is None
return super(Group, self).create_dataset(
name, data=data, shape=shape, dtype=dtype, **kwargs)
return Dataset(group) | python | {
"resource": ""
} |
q273856 | cli_decrypt | test | def cli_decrypt(context, key):
"""
Decrypts context.io_manager's stdin and sends that to
context.io_manager's stdout.
See :py:mod:`swiftly.cli.decrypt` for context usage information.
See :py:class:`CLIDecrypt` for more information.
"""
with context.io_manager.with_stdout() as stdout:
with context.io_manager.with_stdin() as stdin:
crypt_type = stdin.read(1)
if crypt_type == AES256CBC:
for chunk in aes_decrypt(key, stdin):
stdout.write(chunk)
stdout.flush()
else:
raise ReturnCode(
'contents encrypted with unsupported type %r' % crypt_type) | python | {
"resource": ""
} |
q273857 | IOManager.get_stdin | test | def get_stdin(self, os_path=None, skip_sub_command=False):
"""
Returns a stdin-suitable file-like object based on the
optional os_path and optionally skipping any configured
sub-command.
"""
sub_command = None if skip_sub_command else self.stdin_sub_command
inn, path = self._get_in_and_path(
self.stdin, self.stdin_root, sub_command, os_path)
if hasattr(inn, 'stdout'):
return inn.stdout
return inn | python | {
"resource": ""
} |
q273858 | IOManager.get_stdout | test | def get_stdout(self, os_path=None, skip_sub_command=False):
"""
Returns a stdout-suitable file-like object based on the
optional os_path and optionally skipping any configured
sub-command.
"""
sub_command = None if skip_sub_command else self.stdout_sub_command
out, path = self._get_out_and_path(
self.stdout, self.stdout_root, sub_command, os_path)
if hasattr(out, 'stdin'):
return out.stdin
return out | python | {
"resource": ""
} |
q273859 | IOManager.get_stderr | test | def get_stderr(self, os_path=None, skip_sub_command=False):
"""
Returns a stderr-suitable file-like object based on the
optional os_path and optionally skipping any configured
sub-command.
"""
sub_command = None if skip_sub_command else self.stderr_sub_command
out, path = self._get_out_and_path(
self.stderr, self.stderr_root, sub_command, os_path)
if hasattr(out, 'stdin'):
return out.stdin
return out | python | {
"resource": ""
} |
q273860 | IOManager.get_debug | test | def get_debug(self, os_path=None, skip_sub_command=False):
"""
Returns a debug-output-suitable file-like object based on the
optional os_path and optionally skipping any configured
sub-command.
"""
sub_command = None if skip_sub_command else self.debug_sub_command
out, path = self._get_out_and_path(
self.debug, self.debug_root, sub_command, os_path)
if hasattr(out, 'stdin'):
return out.stdin
return out | python | {
"resource": ""
} |
q273861 | IOManager.with_stdin | test | def with_stdin(self, os_path=None, skip_sub_command=False,
disk_closed_callback=None):
"""
A context manager yielding a stdin-suitable file-like object
based on the optional os_path and optionally skipping any
configured sub-command.
:param os_path: Optional path to base the file-like object
on.
:param skip_sub_command: Set True to skip any configured
sub-command filter.
:param disk_closed_callback: If the backing of the file-like
object is an actual file that will be closed,
disk_closed_callback (if set) will be called with the
on-disk path just after closing it.
"""
sub_command = None if skip_sub_command else self.stdin_sub_command
inn, path = self._get_in_and_path(
self.stdin, self.stdin_root, sub_command, os_path)
try:
if hasattr(inn, 'stdout'):
yield inn.stdout
else:
yield inn
finally:
if hasattr(inn, 'stdout'):
self._close(inn.stdout)
self._wait(inn, path)
self._close(inn)
if disk_closed_callback and path:
disk_closed_callback(path) | python | {
"resource": ""
} |
q273862 | IOManager.with_stdout | test | def with_stdout(self, os_path=None, skip_sub_command=False,
disk_closed_callback=None):
"""
A context manager yielding a stdout-suitable file-like object
based on the optional os_path and optionally skipping any
configured sub-command.
:param os_path: Optional path to base the file-like object
on.
:param skip_sub_command: Set True to skip any configured
sub-command filter.
:param disk_closed_callback: If the backing of the file-like
object is an actual file that will be closed,
disk_closed_callback (if set) will be called with the
on-disk path just after closing it.
"""
sub_command = None if skip_sub_command else self.stdout_sub_command
out, path = self._get_out_and_path(
self.stdout, self.stdout_root, sub_command, os_path)
try:
if hasattr(out, 'stdin'):
yield out.stdin
else:
yield out
finally:
if hasattr(out, 'stdin'):
self._close(out.stdin)
self._wait(out, path)
self._close(out)
if disk_closed_callback and path:
disk_closed_callback(path) | python | {
"resource": ""
} |
q273863 | IOManager.with_stderr | test | def with_stderr(self, os_path=None, skip_sub_command=False,
disk_closed_callback=None):
"""
A context manager yielding a stderr-suitable file-like object
based on the optional os_path and optionally skipping any
configured sub-command.
:param os_path: Optional path to base the file-like object
on.
:param skip_sub_command: Set True to skip any configured
sub-command filter.
:param disk_closed_callback: If the backing of the file-like
object is an actual file that will be closed,
disk_closed_callback (if set) will be called with the
on-disk path just after closing it.
"""
sub_command = None if skip_sub_command else self.stderr_sub_command
out, path = self._get_out_and_path(
self.stderr, self.stderr_root, sub_command, os_path)
try:
if hasattr(out, 'stdin'):
yield out.stdin
else:
yield out
finally:
if hasattr(out, 'stdin'):
self._close(out.stdin)
self._wait(out, path)
self._close(out)
if disk_closed_callback and path:
disk_closed_callback(path) | python | {
"resource": ""
} |
q273864 | IOManager.with_debug | test | def with_debug(self, os_path=None, skip_sub_command=False,
disk_closed_callback=None):
"""
A context manager yielding a debug-output-suitable file-like
object based on the optional os_path and optionally skipping
any configured sub-command.
:param os_path: Optional path to base the file-like object
on.
:param skip_sub_command: Set True to skip any configured
sub-command filter.
:param disk_closed_callback: If the backing of the file-like
object is an actual file that will be closed,
disk_closed_callback (if set) will be called with the
on-disk path just after closing it.
"""
sub_command = None if skip_sub_command else self.debug_sub_command
out, path = self._get_out_and_path(
self.debug, self.debug_root, sub_command, os_path)
try:
if hasattr(out, 'stdin'):
yield out.stdin
else:
yield out
finally:
if hasattr(out, 'stdin'):
self._close(out.stdin)
self._wait(out, path)
self._close(out)
if disk_closed_callback and path:
disk_closed_callback(path) | python | {
"resource": ""
} |
q273865 | cli_empty_account | test | def cli_empty_account(context, yes_empty_account=False, until_empty=False):
"""
Deletes all objects and containers in the account.
You must set yes_empty_account to True to verify you really want to
do this.
By default, this will perform one pass at deleting all objects and
containers; so if objects revert to previous versions or if new
objects or containers otherwise arise during the process, the
account may not be empty once done.
Set `until_empty` to True if you want multiple passes to keep trying
to fully empty and delete the containers. Note until_empty=True
could run forever if something else is making new items faster than
they're being deleted.
See :py:mod:`swiftly.cli.delete` for context usage information.
See :py:class:`CLIDelete` for more information.
"""
if not yes_empty_account:
raise ReturnCode(
'called cli_empty_account without setting yes_empty_account=True')
marker = None
while True:
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.get_account(
marker=marker, headers=context.headers, query=context.query,
cdn=context.cdn)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
raise ReturnCode('listing account: %s %s' % (status, reason))
if not contents:
if until_empty and marker:
marker = None
continue
break
for item in contents:
cli_delete(
context, item['name'], context.headers, recursive=True)
marker = item['name'] | python | {
"resource": ""
} |
q273866 | cli_empty_container | test | def cli_empty_container(context, path, until_empty=False):
"""
Deletes all objects in the container.
By default, this will perform one pass at deleting all objects in
the container; so if objects revert to previous versions or if new
objects otherwise arise during the process, the container may not be
empty once done.
Set `until_empty` to True if you want multiple passes to keep trying
to fully empty the container. Note until_empty=True could run
forever if something else is making new objects faster than they're
being deleted.
See :py:mod:`swiftly.cli.delete` for context usage information.
See :py:class:`CLIDelete` for more information.
"""
path = path.rstrip('/').decode('utf8')
conc = Concurrency(context.concurrency)
def check_conc():
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
with context.io_manager.with_stderr() as fp:
fp.write(str(exc_value))
fp.write('\n')
fp.flush()
marker = None
while True:
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.get_container(
path, marker=marker, headers=context.headers,
query=context.query, cdn=context.cdn)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
raise ReturnCode(
'listing container %r: %s %s' % (path, status, reason))
if not contents:
if until_empty and marker:
marker = None
continue
break
for item in contents:
newpath = '%s/%s' % (path, item['name'])
new_context = context.copy()
new_context.ignore_404 = True
check_conc()
conc.spawn(newpath, cli_delete, new_context, newpath)
marker = item['name']
conc.join()
check_conc() | python | {
"resource": ""
} |
q273867 | _stdout_filed | test | def _stdout_filed(func):
"""
Instance method decorator to convert an optional file keyword
argument into an actual value, whether it be a passed value, a
value obtained from an io_manager, or sys.stdout.
"""
def wrapper(self, file=None):
if file:
return func(self, file=file)
elif self.io_manager:
with self.io_manager.with_stdout() as stdout:
return func(self, file=stdout)
else:
return func(self, file=sys.stdout)
wrapper.__doc__ = func.__doc__
return wrapper | python | {
"resource": ""
} |
q273868 | _stderr_filed | test | def _stderr_filed(func):
"""
Instance method decorator to convert an optional file keyword
argument into an actual value, whether it be a passed value, a
value obtained from an io_manager, or sys.stderr.
"""
def wrapper(self, msg, file=None):
if file:
return func(self, msg, file=file)
elif self.io_manager:
with self.io_manager.with_stderr() as stderr:
return func(self, msg, file=stderr)
else:
return func(self, msg, file=sys.stderr)
wrapper.__doc__ = func.__doc__
return wrapper | python | {
"resource": ""
} |
q273869 | OptionParser.error | test | def error(self, msg, file=None):
"""
Outputs the error msg to the file if specified, or to the
io_manager's stderr if available, or to sys.stderr.
"""
self.error_encountered = True
file.write(self.error_prefix)
file.write(msg)
file.write('\n')
file.flush() | python | {
"resource": ""
} |
q273870 | OptionParser.print_help | test | def print_help(self, file=None):
"""
Outputs help information to the file if specified, or to the
io_manager's stdout if available, or to sys.stdout.
"""
optparse.OptionParser.print_help(self, file)
if self.raw_epilog:
file.write(self.raw_epilog)
file.flush() | python | {
"resource": ""
} |
q273871 | OptionParser.print_usage | test | def print_usage(self, file=None):
"""
Outputs usage information to the file if specified, or to the
io_manager's stdout if available, or to sys.stdout.
"""
optparse.OptionParser.print_usage(self, file)
file.flush() | python | {
"resource": ""
} |
q273872 | OptionParser.print_version | test | def print_version(self, file=None):
"""
Outputs version information to the file if specified, or to
the io_manager's stdout if available, or to sys.stdout.
"""
optparse.OptionParser.print_version(self, file)
file.flush() | python | {
"resource": ""
} |
q273873 | Client.request | test | def request(self, method, path, contents, headers, decode_json=False,
stream=False, query=None, cdn=False):
"""
Performs a direct HTTP request to the Swift service.
:param method: The request method ('GET', 'HEAD', etc.)
:param path: The request path.
:param contents: The body of the request. May be a string or
a file-like object.
:param headers: A dict of request headers and values.
:param decode_json: If set True, the response body will be
treated as JSON and decoded result returned instead of
the raw contents.
:param stream: If set True, the response body will return as
a file-like object; otherwise, the response body will be
read in its entirety and returned as a string. Overrides
decode_json.
:param query: A dict of query parameters and values to append
to the path.
:param cdn: If set True, the request will be sent to the CDN
management endpoint instead of the default storage
endpoint.
:returns: A tuple of (status, reason, headers, contents).
:status: An int for the HTTP status code.
:reason: The str for the HTTP status (ex: "Ok").
:headers: A dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be
a list.
:contents: Depending on the decode_json and stream
settings, this will either be the raw response
string, the JSON decoded object, or a file-like
object.
"""
raise Exception('request method not implemented') | python | {
"resource": ""
} |
q273874 | Client.post_account | test | def post_account(self, headers=None, query=None, cdn=False, body=None):
"""
POSTs the account and returns the results. This is usually
done to set X-Account-Meta-xxx headers. Note that any existing
X-Account-Meta-xxx headers will remain untouched. To remove an
X-Account-Meta-xxx header, send the header with an empty
string as its value.
:param headers: Additional headers to send with the request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param body: No known Swift POSTs take a body; but the option
is there for the future.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the str for the HTTP body.
"""
return self.request(
'POST', '', body or '', headers, query=query, cdn=cdn) | python | {
"resource": ""
} |
q273875 | Client.delete_account | test | def delete_account(self, headers=None,
yes_i_mean_delete_the_account=False, query=None,
cdn=False, body=None):
"""
Sends a DELETE request to the account and returns the results.
With ``query['bulk-delete'] = ''`` this might mean a bulk
delete request where the body of the request is new-line
separated, url-encoded list of names to delete. Be careful
with this! One wrong move and you might mark your account for
deletion of you have the access to do so!
For a plain DELETE to the account, on clusters that support
it and, assuming you have permissions to do so, the account
will be marked as deleted and immediately begin removing the
objects from the cluster in the backgound.
THERE IS NO GOING BACK!
:param headers: Additional headers to send with the request.
:param yes_i_mean_delete_the_account: Set to True to verify
you really mean to delete the entire account. This is
required unless ``body and 'bulk-delete' in query``.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param body: Some account DELETE requests, like the bulk
delete request, take a body.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be
a list.
:contents: is the str for the HTTP body.
"""
if not yes_i_mean_delete_the_account and (
not body or not query or 'bulk-delete' not in query):
return (0, 'yes_i_mean_delete_the_account was not set to True', {},
'')
return self.request(
'DELETE', '', body or '', headers, query=query, cdn=cdn) | python | {
"resource": ""
} |
q273876 | Client.put_container | test | def put_container(self, container, headers=None, query=None, cdn=False,
body=None):
"""
PUTs the container and returns the results. This is usually
done to create new containers and can also be used to set
X-Container-Meta-xxx headers. Note that if the container
already exists, any existing X-Container-Meta-xxx headers will
remain untouched. To remove an X-Container-Meta-xxx header,
send the header with an empty string as its value.
:param container: The name of the container.
:param headers: Additional headers to send with the request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param body: Some container PUT requests, like the
extract-archive bulk upload request, take a body.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the str for the HTTP body.
"""
path = self._container_path(container)
return self.request(
'PUT', path, body or '', headers, query=query, cdn=cdn) | python | {
"resource": ""
} |
q273877 | Client.head_object | test | def head_object(self, container, obj, headers=None, query=None, cdn=False):
"""
HEADs the object and returns the results.
:param container: The name of the container.
:param obj: The name of the object.
:param headers: Additional headers to send with the request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the str for the HTTP body.
"""
path = self._object_path(container, obj)
return self.request(
'HEAD', path, '', headers, query=query, cdn=cdn) | python | {
"resource": ""
} |
q273878 | Client.get_object | test | def get_object(self, container, obj, headers=None, stream=True, query=None,
cdn=False):
"""
GETs the object and returns the results.
:param container: The name of the container.
:param obj: The name of the object.
:param headers: Additional headers to send with the request.
:param stream: Indicates whether to stream the contents or
preread them fully and return them as a str. Default:
True to stream the contents. When streaming, contents
will have the standard file-like-object read function,
which accepts an optional size parameter to limit how
much data is read per call. When streaming is on, be
certain to fully read the contents before issuing another
request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: if *stream* was True, *contents* is a
file-like-object of the contents of the HTTP body. If
*stream* was False, *contents* is just a simple str of
the HTTP body.
"""
path = self._object_path(container, obj)
return self.request(
'GET', path, '', headers, query=query, stream=stream, cdn=cdn) | python | {
"resource": ""
} |
q273879 | Client.put_object | test | def put_object(self, container, obj, contents, headers=None, query=None,
cdn=False):
"""
PUTs the object and returns the results. This is used to
create or overwrite objects. X-Object-Meta-xxx can optionally
be sent to be stored with the object. Content-Type,
Content-Encoding and other standard HTTP headers can often
also be set, depending on the Swift cluster.
Note that you can set the ETag header to the MD5 sum of the
contents for extra verification the object was stored
correctly.
:param container: The name of the container.
:param obj: The name of the object.
:param contents: The contents of the object to store. This can
be a simple str, or a file-like-object with at least a
read function. If the file-like-object also has tell and
seek functions, the PUT can be reattempted on any server
error.
:param headers: Additional headers to send with the request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the str for the HTTP body.
"""
path = self._object_path(container, obj)
return self.request(
'PUT', path, contents, headers, query=query, cdn=cdn) | python | {
"resource": ""
} |
q273880 | Client.post_object | test | def post_object(self, container, obj, headers=None, query=None, cdn=False,
body=None):
"""
POSTs the object and returns the results. This is used to
update the object's header values. Note that all headers must
be sent with the POST, unlike the account and container POSTs.
With account and container POSTs, existing headers are
untouched. But with object POSTs, any existing headers are
removed. The full list of supported headers depends on the
Swift cluster, but usually include Content-Type,
Content-Encoding, and any X-Object-Meta-xxx headers.
:param container: The name of the container.
:param obj: The name of the object.
:param headers: Additional headers to send with the request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param body: No known Swift POSTs take a body; but the option
is there for the future.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the str for the HTTP body.
"""
path = self._object_path(container, obj)
return self.request(
'POST', path, body or '', headers, query=query, cdn=cdn) | python | {
"resource": ""
} |
q273881 | CLI._resolve_option | test | def _resolve_option(self, options, option_name, section_name):
"""Resolves an option value into options.
Sets options.<option_name> to a resolved value. Any value
already in options overrides a value in os.environ which
overrides self.context.conf.
:param options: The options instance as returned by optparse.
:param option_name: The name of the option, such as
``auth_url``.
:param section_name: The name of the section, such as
``swiftly``.
"""
if getattr(options, option_name, None) is not None:
return
if option_name.startswith(section_name + '_'):
environ_name = option_name.upper()
conf_name = option_name[len(section_name) + 1:]
else:
environ_name = (section_name + '_' + option_name).upper()
conf_name = option_name
setattr(
options, option_name,
os.environ.get(
environ_name,
(self.context.conf.get(section_name, {})).get(conf_name))) | python | {
"resource": ""
} |
q273882 | CLIContext.copy | test | def copy(self):
"""
Returns a new CLIContext instance that is a shallow copy of
the original, much like dict's copy method.
"""
context = CLIContext()
for item in dir(self):
if item[0] != '_' and item not in ('copy', 'write_headers'):
setattr(context, item, getattr(self, item))
return context | python | {
"resource": ""
} |
q273883 | CLIContext.write_headers | test | def write_headers(self, fp, headers, mute=None):
"""
Convenience function to output headers in a formatted fashion
to a file-like fp, optionally muting any headers in the mute
list.
"""
if headers:
if not mute:
mute = []
fmt = '%%-%ds %%s\n' % (max(len(k) for k in headers) + 1)
for key in sorted(headers):
if key in mute:
continue
fp.write(fmt % (key.title() + ':', headers[key]))
fp.flush() | python | {
"resource": ""
} |
q273884 | cli_auth | test | def cli_auth(context):
"""
Authenticates and then outputs the resulting information.
See :py:mod:`swiftly.cli.auth` for context usage information.
See :py:class:`CLIAuth` for more information.
"""
with context.io_manager.with_stdout() as fp:
with context.client_manager.with_client() as client:
info = []
client.auth()
if getattr(client, 'auth_cache_path', None):
info.append(('Auth Cache', client.auth_cache_path))
if getattr(client, 'auth_url', None):
info.append(('Auth URL', client.auth_url))
if getattr(client, 'auth_user', None):
info.append(('Auth User', client.auth_user))
if getattr(client, 'auth_key', None):
info.append(('Auth Key', client.auth_key))
if getattr(client, 'auth_tenant', None):
info.append(('Auth Tenant', client.auth_tenant))
if getattr(client, 'auth_methods', None):
info.append(('Auth Methods', client.auth_methods))
if getattr(client, 'storage_path', None):
info.append(('Direct Storage Path', client.storage_path))
if getattr(client, 'cdn_path', None):
info.append(('Direct CDN Path', client.cdn_path))
if getattr(client, 'local_path', None):
info.append(('Local Path', client.local_path))
if getattr(client, 'regions', None):
info.append(('Regions', ' '.join(client.regions)))
if getattr(client, 'default_region', None):
info.append(('Default Region', client.default_region))
if getattr(client, 'region', None):
info.append(('Selected Region', client.region))
if getattr(client, 'snet', None):
info.append(('SNet', client.snet))
if getattr(client, 'storage_url', None):
info.append(('Storage URL', client.storage_url))
if getattr(client, 'cdn_url', None):
info.append(('CDN URL', client.cdn_url))
if getattr(client, 'auth_token', None):
info.append(('Auth Token', client.auth_token))
if not info:
info.append((
'No auth information available',
'Maybe no credentials were provided?'))
fmt = '%%-%ds %%s\n' % (max(len(t) for t, v in info) + 1)
for t, v in info:
fp.write(fmt % (t + ':', v))
fp.flush() | python | {
"resource": ""
} |
q273885 | generate_temp_url | test | def generate_temp_url(method, url, seconds, key):
"""
Returns a TempURL good for the given request method, url, and
number of seconds from now, signed by the given key.
"""
method = method.upper()
base_url, object_path = url.split('/v1/')
object_path = '/v1/' + object_path
expires = int(time.time() + seconds)
hmac_body = '%s\n%s\n%s' % (method, expires, object_path)
sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
return '%s%s?temp_url_sig=%s&temp_url_expires=%s' % (
base_url, object_path, sig, expires) | python | {
"resource": ""
} |
q273886 | quote | test | def quote(value, safe='/:'):
"""
Much like parse.quote in that it returns a URL encoded string
for the given value, protecting the safe characters; but this
version also ensures the value is UTF-8 encoded.
"""
if isinstance(value, six.text_type):
value = value.encode('utf8')
elif not isinstance(value, six.string_types):
value = str(value)
return parse.quote(value, safe) | python | {
"resource": ""
} |
q273887 | cli_fordo | test | def cli_fordo(context, path=None):
"""
Issues commands for each item in an account or container listing.
See :py:mod:`swiftly.cli.fordo` for context usage information.
See :py:class:`CLIForDo` for more information.
"""
path = path.lstrip('/') if path else None
if path and '/' in path:
raise ReturnCode(
'path must be an empty string or a container name; was %r' % path)
limit = context.query.get('limit')
delimiter = context.query.get('delimiter')
prefix = context.query.get('prefix')
marker = context.query.get('marker')
end_marker = context.query.get('end_marker')
conc = Concurrency(context.concurrency)
while True:
with context.client_manager.with_client() as client:
if not path:
status, reason, headers, contents = client.get_account(
headers=context.headers, prefix=prefix,
delimiter=delimiter, marker=marker, end_marker=end_marker,
limit=limit, query=context.query, cdn=context.cdn)
else:
status, reason, headers, contents = client.get_container(
path, headers=context.headers, prefix=prefix,
delimiter=delimiter, marker=marker, end_marker=end_marker,
limit=limit, query=context.query, cdn=context.cdn)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
if hasattr(contents, 'read'):
contents.read()
if not path:
raise ReturnCode(
'listing account: %s %s' % (status, reason))
else:
raise ReturnCode(
'listing container %r: %s %s' % (path, status, reason))
if not contents:
break
for item in contents:
name = (path + '/' if path else '') + item.get(
'name', item.get('subdir'))
args = list(context.remaining_args)
try:
index = args.index('<item>')
except ValueError:
raise ReturnCode(
'No "<item>" designation found in the "do" clause.')
args[index] = name
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
conc.join()
raise exc_value
conc.spawn(name, _cli_call, context, name, args)
marker = contents[-1]['name']
if limit:
break
conc.join()
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
conc.join()
raise exc_value | python | {
"resource": ""
} |
q273888 | ClientManager.get_client | test | def get_client(self):
"""
Obtains a client for use, whether an existing unused client
or a brand new one if none are available.
"""
client = None
try:
client = self.clients.get(block=False)
except queue.Empty:
pass
if not client:
self.client_id += 1
kwargs = dict(self.kwargs)
kwargs['verbose_id'] = kwargs.get(
'verbose_id', '') + str(self.client_id)
client = self.client_class(*self.args, **kwargs)
return client | python | {
"resource": ""
} |
q273889 | aes_encrypt | test | def aes_encrypt(key, stdin, preamble=None, chunk_size=65536,
content_length=None):
"""
Generator that encrypts a content stream using AES 256 in CBC
mode.
:param key: Any string to use as the encryption key.
:param stdin: Where to read the contents from.
:param preamble: str to yield initially useful for providing a
hint for future readers as to the algorithm in use.
:param chunk_size: Largest amount to read at once.
:param content_length: The number of bytes to read from stdin.
None or < 0 indicates reading until EOF.
"""
if not AES256CBC_Support:
raise Exception(
'AES256CBC not supported; likely pycrypto is not installed')
if preamble:
yield preamble
# Always use 256-bit key
key = hashlib.sha256(key).digest()
# At least 16 and a multiple of 16
chunk_size = max(16, chunk_size >> 4 << 4)
iv = Crypto.Random.new().read(16)
yield iv
encryptor = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, iv)
reading = True
left = None
if content_length is not None and content_length >= 0:
left = content_length
while reading:
size = chunk_size
if left is not None and size > left:
size = left
chunk = stdin.read(size)
if not chunk:
if left is not None and left > 0:
raise IOError('Early EOF from input')
# Indicates how many usable bytes in last block
yield encryptor.encrypt('\x00' * 16)
break
if left is not None:
left -= len(chunk)
if left <= 0:
reading = False
block = chunk
trailing = len(block) % 16
while trailing:
size = 16 - trailing
if left is not None and size > left:
size = left
chunk = stdin.read(size)
if not chunk:
if left is not None and left > 0:
raise IOError('Early EOF from input')
reading = False
# Indicates how many usable bytes in last block
chunk = chr(trailing) * (16 - trailing)
elif left is not None:
left -= len(chunk)
if left <= 0:
reading = False
block += chunk
trailing = len(block) % 16
yield encryptor.encrypt(block) | python | {
"resource": ""
} |
q273890 | aes_decrypt | test | def aes_decrypt(key, stdin, chunk_size=65536):
"""
Generator that decrypts a content stream using AES 256 in CBC
mode.
:param key: Any string to use as the decryption key.
:param stdin: Where to read the encrypted data from.
:param chunk_size: Largest amount to read at once.
"""
if not AES256CBC_Support:
raise Exception(
'AES256CBC not supported; likely pycrypto is not installed')
# Always use 256-bit key
key = hashlib.sha256(key).digest()
# At least 16 and a multiple of 16
chunk_size = max(16, chunk_size >> 4 << 4)
iv = stdin.read(16)
while len(iv) < 16:
chunk = stdin.read(16 - len(iv))
if not chunk:
raise IOError('EOF reading IV')
decryptor = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, iv)
data = ''
while True:
chunk = stdin.read(chunk_size)
if not chunk:
if len(data) != 16:
raise IOError('EOF reading encrypted stream')
data = decryptor.decrypt(data)
trailing = ord(data[-1])
if trailing > 15:
raise IOError(
'EOF reading encrypted stream or trailing value corrupted '
'%s' % trailing)
yield data[:trailing]
break
data += chunk
if len(data) > 16:
# Always leave at least one byte pending
trailing = (len(data) % 16) or 16
yield decryptor.decrypt(data[:-trailing])
data = data[-trailing:] | python | {
"resource": ""
} |
q273891 | cli_put_directory_structure | test | def cli_put_directory_structure(context, path):
"""
Performs PUTs rooted at the path using a directory structure
pointed to by context.input\_.
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
"""
if not context.input_:
raise ReturnCode(
'called cli_put_directory_structure without context.input_ set')
if not os.path.isdir(context.input_):
raise ReturnCode(
'%r is not a directory' % context.input_)
if not path:
raise ReturnCode(
'uploading a directory structure requires at least a container '
'name')
new_context = context.copy()
new_context.input_ = None
container = path.split('/', 1)[0]
cli_put_container(new_context, container)
ilen = len(context.input_)
if not context.input_.endswith(os.sep):
ilen += 1
conc = Concurrency(context.concurrency)
for (dirpath, dirnames, filenames) in os.walk(context.input_):
if not dirnames and not filenames:
new_context = context.copy()
new_context.headers = dict(context.headers)
new_context.headers['content-type'] = 'text/directory'
new_context.headers['x-object-meta-mtime'] = \
'%f' % os.path.getmtime(context.input_)
new_context.input_ = None
new_context.empty = True
new_path = path
if path[-1] != '/':
new_path += '/'
new_path += dirpath[ilen:]
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
conc.join()
raise exc_value
conc.spawn(new_path, cli_put_object, new_context, new_path)
else:
for fname in filenames:
new_context = context.copy()
new_context.input_ = os.path.join(dirpath, fname)
new_path = path
if path[-1] != '/':
new_path += '/'
if dirpath[ilen:]:
new_path += dirpath[ilen:] + '/'
new_path += fname
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
conc.join()
raise exc_value
conc.spawn(new_path, cli_put_object, new_context, new_path)
conc.join()
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
raise exc_value | python | {
"resource": ""
} |
q273892 | cli_put_account | test | def cli_put_account(context):
"""
Performs a PUT on the account.
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
"""
body = None
if context.input_:
if context.input_ == '-':
body = context.io_manager.get_stdin()
else:
body = open(context.input_, 'rb')
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.put_account(
headers=context.headers, query=context.query, cdn=context.cdn,
body=body)
if hasattr(contents, 'read'):
contents.read()
if status // 100 != 2:
raise ReturnCode('putting account: %s %s' % (status, reason)) | python | {
"resource": ""
} |
q273893 | cli_put_container | test | def cli_put_container(context, path):
"""
Performs a PUT on the container.
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
"""
path = path.rstrip('/')
if '/' in path:
raise ReturnCode('called cli_put_container with object %r' % path)
body = None
if context.input_:
if context.input_ == '-':
body = context.io_manager.get_stdin()
else:
body = open(context.input_, 'rb')
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.put_container(
path, headers=context.headers, query=context.query,
cdn=context.cdn, body=body)
if hasattr(contents, 'read'):
contents.read()
if status // 100 != 2:
raise ReturnCode(
'putting container %r: %s %s' % (path, status, reason)) | python | {
"resource": ""
} |
q273894 | _get_manifest_body | test | def _get_manifest_body(context, prefix, path2info, put_headers):
"""
Returns body for manifest file and modifies put_headers.
path2info is a dict like {"path": (size, etag)}
"""
if context.static_segments:
body = json.dumps([
{'path': '/' + p, 'size_bytes': s, 'etag': e}
for p, (s, e) in sorted(six.iteritems(path2info))
])
put_headers['content-length'] = str(len(body))
context.query['multipart-manifest'] = 'put'
else:
body = ''
put_headers['content-length'] = '0'
put_headers['x-object-manifest'] = prefix
return body | python | {
"resource": ""
} |
q273895 | _create_container | test | def _create_container(context, path, l_mtime, size):
"""
Creates container for segments of file with `path`
"""
new_context = context.copy()
new_context.input_ = None
new_context.headers = None
new_context.query = None
container = path.split('/', 1)[0] + '_segments'
cli_put_container(new_context, container)
prefix = container + '/' + path.split('/', 1)[1]
prefix = '%s/%s/%s/' % (prefix, l_mtime, size)
return prefix | python | {
"resource": ""
} |
q273896 | cli_tempurl | test | def cli_tempurl(context, method, path, seconds=None, use_container=False):
"""
Generates a TempURL and sends that to the context.io_manager's
stdout.
See :py:mod:`swiftly.cli.tempurl` for context usage information.
See :py:class:`CLITempURL` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param method: The method for the TempURL (GET, PUT, etc.)
:param path: The path the TempURL should direct to.
:param seconds: The number of seconds the TempURL should be good
for. Default: 3600
:param use_container: If True, will create a container level TempURL
useing X-Container-Meta-Temp-Url-Key instead of
X-Account-Meta-Temp-Url-Key.
"""
with contextlib.nested(
context.io_manager.with_stdout(),
context.client_manager.with_client()) as (fp, client):
method = method.upper()
path = path.lstrip('/')
seconds = seconds if seconds is not None else 3600
if '/' not in path:
raise ReturnCode(
'invalid tempurl path %r; should have a / within it' % path)
if use_container:
key_type = 'container'
container = path.split('/', 1)[0]
status, reason, headers, contents = \
client.head_container(container)
else:
key_type = 'account'
status, reason, headers, contents = \
client.head_account()
if status // 100 != 2:
raise ReturnCode(
'obtaining X-%s-Meta-Temp-Url-Key: %s %s' %
(key_type.title(), status, reason))
key = headers.get('x-%s-meta-temp-url-key' % key_type)
if not key:
raise ReturnCode(
'there is no X-%s-Meta-Temp-Url-Key set for this %s' %
(key_type.title(), key_type))
url = client.storage_url + '/' + path
fp.write(generate_temp_url(method, url, seconds, key))
fp.write('\n')
fp.flush() | python | {
"resource": ""
} |
q273897 | cli_trans | test | def cli_trans(context, x_trans_id):
"""
Translates any information that can be determined from the
x_trans_id and sends that to the context.io_manager's stdout.
See :py:mod:`swiftly.cli.trans` for context usage information.
See :py:class:`CLITrans` for more information.
"""
with context.io_manager.with_stdout() as fp:
trans_time = get_trans_id_time(x_trans_id)
trans_info = x_trans_id[34:]
msg = 'X-Trans-Id: ' + x_trans_id + '\n'
if not trans_time:
msg += 'Time Stamp: None, old style id with no time ' \
'embedded\nUTC Time: None, old style id with no time ' \
'embedded\n'
else:
msg += 'Time Stamp: %s\nUTC Time: %s\n' % (
trans_time,
time.strftime(
'%a %Y-%m-%d %H:%M:%S UTC', time.gmtime(trans_time)))
msg += 'Additional Info: ' + trans_info + '\n'
fp.write(msg)
fp.flush() | python | {
"resource": ""
} |
q273898 | cli_help | test | def cli_help(context, command_name, general_parser, command_parsers):
"""
Outputs help information.
See :py:mod:`swiftly.cli.help` for context usage information.
See :py:class:`CLIHelp` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param command_name: The command_name to output help information
for, or set to None or an empty string to output the general
help information.
:param general_parser: The
:py:class:`swiftly.cli.optionparser.OptionParser` for general
usage.
:param command_parsers: A dict of (name, :py:class:`CLICommand`)
for specific command usage.
"""
if command_name == 'for':
command_name = 'fordo'
with context.io_manager.with_stdout() as stdout:
if not command_name:
general_parser.print_help(stdout)
elif command_name in command_parsers:
command_parsers[command_name].option_parser.print_help(stdout)
else:
raise ReturnCode('unknown command %r' % command_name) | python | {
"resource": ""
} |
q273899 | FileLikeIter.is_empty | test | def is_empty(self):
"""
Check whether the "file" is empty reading the single byte.
"""
something = self.read(1)
if something:
if self.buf:
self.buf = something + self.buf
else:
self.buf = something
return False
else:
return True | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.