hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9ad055e162f0001e288ab22dec6a5a4746fd51d
| 2,786
|
py
|
Python
|
Neuro-Cognitive Models/Runs/Nonhier_run/res_nonhier.py
|
AGhaderi/spatial_attenNCM
|
1f7edf17f55d804d2ae3360d23623c9ab5035518
|
[
"MIT"
] | null | null | null |
Neuro-Cognitive Models/Runs/Nonhier_run/res_nonhier.py
|
AGhaderi/spatial_attenNCM
|
1f7edf17f55d804d2ae3360d23623c9ab5035518
|
[
"MIT"
] | null | null | null |
Neuro-Cognitive Models/Runs/Nonhier_run/res_nonhier.py
|
AGhaderi/spatial_attenNCM
|
1f7edf17f55d804d2ae3360d23623c9ab5035518
|
[
"MIT"
] | null | null | null |
#!/home/a.ghaderi/.conda/envs/envjm/bin/python
# Model 2
import pystan
import pandas as pd
import numpy as np
import sys
sys.path.append('../../')
import utils
parts = 1
data = utils.get_data() #loading dateset
data = data[data['participant']==parts]
mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat
obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat
N_mis = mis.shape[0] # number of missing data
N_obs = obs.shape[0] # number of observed data
modelfile = '../../stans/res_nonhier.stan' #reading the model span
f = open(modelfile, 'r')
model_wiener = f.read()
sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan
ncohers = 2 #Number of coherence conditions
nspats = 2 #Number of spatial conditions
nconds = 4 #Number of conditions
y = data['y'].to_numpy()
cond_coher = data['cond_coher'].to_numpy()
cond_spat = data['cond_spat'].to_numpy()
conds = data['conds'].to_numpy()
n200lat = data['n200lat'].to_numpy()
#set inistial data for molde span
data_winner = {'N_obs':N_obs, #Number of trial-level observations
'N_mis':N_mis, #Number of trial-level mising data
'ncohers':ncohers, #Number of coherence conditions
'nspats':nspats, #Number of spatial conditions
'nconds':nconds, #Number of conditions
'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data
'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial
'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial
'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial
'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation
# setting MCMC arguments
niter = 10000
nwarmup = 4000
nchains = 1
thin = 1
initials = [] # initial sampling
for c in range(0, nchains):
chaininit = {
'delta': np.random.uniform(1, 3, size=ncohers),
'alpha': np.random.uniform(.5, 1.),
'eta': np.random.uniform(.01, .2),
'res': np.random.uniform(.01, .02, size=nspats),
'n200sub': np.random.uniform(.11, .2, size=nconds),
'lambda': np.random.uniform(.01, .02),
'n200lat_mis': np.random.uniform(.11, .2, size = N_mis)
}
initials.append(chaininit)
# Train the model and generate samples
fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials)
utils.to_pickle(stan_model=sm, stan_fit=fit, save_path='../../save/nonhier/'+str(parts)+'_res_nonhier.pkl')
| 39.8
| 116
| 0.648959
| 390
| 2,786
| 4.541026
| 0.341026
| 0.045172
| 0.059289
| 0.028797
| 0.188594
| 0.094862
| 0.040655
| 0.040655
| 0.040655
| 0
| 0
| 0.041892
| 0.203159
| 2,786
| 69
| 117
| 40.376812
| 0.755856
| 0.262742
| 0
| 0
| 0
| 0
| 0.121362
| 0.013814
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.09434
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9adc3a3c0f82e03cf53dd13486c80b1bb9dbf85
| 6,691
|
py
|
Python
|
rq_dashboard/dashboard.py
|
refgenomics/rq-dashboard
|
cdfadd2b9aa9a66b0594fd5573e3c45fa8643f05
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
rq_dashboard/dashboard.py
|
refgenomics/rq-dashboard
|
cdfadd2b9aa9a66b0594fd5573e3c45fa8643f05
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
rq_dashboard/dashboard.py
|
refgenomics/rq-dashboard
|
cdfadd2b9aa9a66b0594fd5573e3c45fa8643f05
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from redis import Redis
from redis import from_url
from rq import push_connection, pop_connection
from rq.job import Job
from functools import wraps
import times
from flask import Blueprint
from flask import current_app, url_for, abort
from flask import render_template
from rq import Queue, Worker
from rq import cancel_job, requeue_job
from rq import get_failed_queue
from math import ceil
dashboard = Blueprint('rq_dashboard', __name__,
template_folder='templates',
static_folder='static',
)
@dashboard.before_request
def authentication_hook():
""" Allow the parent app to authenticate user's access to the dashboard
with it's own auth_handler method that must return True or False
"""
auth_handler = current_app.extensions['rq-dashboard'].auth_handler
if auth_handler and not auth_handler():
abort(401)
@dashboard.before_app_first_request
def setup_rq_connection():
if current_app.config.get('REDIS_URL'):
current_app.redis_conn = from_url(current_app.config.get('REDIS_URL'))
else:
current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'),
port=current_app.config.get('REDIS_PORT', 6379),
password=current_app.config.get('REDIS_PASSWORD', None),
db=current_app.config.get('REDIS_DB', 0))
@dashboard.before_request
def push_rq_connection():
push_connection(current_app.redis_conn)
@dashboard.teardown_request
def pop_rq_connection(exception=None):
pop_connection()
def jsonify(f):
@wraps(f)
def _wrapped(*args, **kwargs):
from flask import jsonify as flask_jsonify
try:
result_dict = f(*args, **kwargs)
except Exception as e:
result_dict = dict(status='error')
if current_app.config['DEBUG']:
result_dict['reason'] = str(e)
from traceback import format_exc
result_dict['exc_info'] = format_exc()
return flask_jsonify(**result_dict)
return _wrapped
def serialize_queues(queues):
return [dict(name=q.name, count=q.count, url=url_for('.overview',
queue_name=q.name)) for q in queues]
def serialize_date(dt):
if dt is None:
return None
return times.format(dt, 'UTC')
def serialize_job(job):
return dict(
id=job.id,
created_at=serialize_date(job.created_at),
enqueued_at=serialize_date(job.enqueued_at),
ended_at=serialize_date(job.ended_at),
origin=job.origin,
result=job._result,
exc_info=job.exc_info,
description=job.description)
def remove_none_values(input_dict):
return dict([ (k,v) for k,v in input_dict.items() if v is not None ])
def pagination_window(total_items, cur_page, per_page=5, window_size=10):
all_pages = range(1, int(ceil(total_items / float(per_page))) + 1)
results = all_pages
if (window_size >= 1):
pages_window_start = int(max(0, min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size / 2.0))))
pages_window_end = int(pages_window_start + window_size)
result = all_pages[pages_window_start:pages_window_end]
return result
@dashboard.route('/', defaults={'queue_name': None, 'page': '1'})
@dashboard.route('/<queue_name>', defaults={'page': '1'})
@dashboard.route('/<queue_name>/<page>')
def overview(queue_name, page):
if queue_name is None:
# Show the failed queue by default if it contains any jobs
failed = Queue('failed')
if not failed.is_empty():
queue = failed
else:
queue = Queue()
else:
queue = Queue(queue_name)
return render_template('rq_dashboard/dashboard.html',
workers=Worker.all(),
queue=queue,
page=page,
queues=Queue.all(),
rq_url_prefix=url_for('.overview'))
@dashboard.route('/job/<job_id>/cancel', methods=['POST'])
@jsonify
def cancel_job_view(job_id):
rq_job = Job.fetch(job_id)
if rq_job.status == "failed":
rq_job.delete()
else:
rq_job.cancel()
return dict(status='OK')
@dashboard.route('/job/<job_id>/requeue', methods=['POST'])
@jsonify
def requeue_job_view(job_id):
requeue_job(job_id)
return dict(status='OK')
@dashboard.route('/requeue-all', methods=['GET', 'POST'])
@jsonify
def requeue_all():
fq = get_failed_queue()
job_ids = fq.job_ids
count = len(job_ids)
for job_id in job_ids:
requeue_job(job_id)
return dict(status='OK', count=count)
@dashboard.route('/queue/<queue_name>/empty', methods=['POST'])
@jsonify
def empty_queue(queue_name):
q = Queue(queue_name)
q.empty()
return dict(status='OK')
@dashboard.route('/queue/<queue_name>/compact', methods=['POST'])
@jsonify
def compact_queue(queue_name):
q = Queue(queue_name)
q.compact()
return dict(status='OK')
@dashboard.route('/queues.json')
@jsonify
def list_queues():
queues = serialize_queues(sorted(Queue.all()))
return dict(queues=queues)
@dashboard.route('/jobs/<queue_name>/<page>.json')
@jsonify
def list_jobs(queue_name, page):
current_page = int(page)
queue = Queue(queue_name)
per_page = 5
total_items = queue.count
pages_numbers_in_window = pagination_window(total_items, current_page, per_page)
pages_in_window = [ dict(number=p, url=url_for('.overview',
queue_name=queue_name, page=p)) for p in pages_numbers_in_window ]
last_page = int(ceil(total_items / float(per_page)))
prev_page = None
if current_page > 1:
prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1)))
next_page = None
if current_page < last_page:
next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1)))
pagination = remove_none_values(
dict(pages_in_window=pages_in_window,
next_page=next_page,
prev_page=prev_page))
offset = (current_page - 1) * per_page
jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)]
return dict(name=queue.name, jobs=jobs, pagination=pagination)
@dashboard.route('/workers.json')
@jsonify
def list_workers():
def serialize_queue_names(worker):
return [q.name for q in worker.queues]
workers = [dict(name=worker.name, queues=serialize_queue_names(worker),
state=worker.get_state()) for worker in Worker.all()]
return dict(workers=workers)
@dashboard.context_processor
def inject_interval():
interval = current_app.config.get('RQ_POLL_INTERVAL', 2500)
return dict(poll_interval=interval)
| 29.606195
| 115
| 0.67658
| 929
| 6,691
| 4.624327
| 0.19591
| 0.048184
| 0.029795
| 0.030959
| 0.196229
| 0.136639
| 0.079376
| 0.065875
| 0.036546
| 0.027467
| 0
| 0.005448
| 0.204454
| 6,691
| 225
| 116
| 29.737778
| 0.801616
| 0.028396
| 0
| 0.139535
| 0
| 0
| 0.074371
| 0.020059
| 0
| 0
| 0
| 0
| 0
| 1
| 0.127907
| false
| 0.005814
| 0.087209
| 0.023256
| 0.325581
| 0.011628
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9b691941c62b002880bb1f21ca60b0e932e41c1
| 3,574
|
py
|
Python
|
peaksampl.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
peaksampl.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
peaksampl.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
import numpy as np
def _adddims(a, b):
n = max(a.ndim, b.ndim)
a = np.expand_dims(a, tuple(range(n - a.ndim)))
b = np.expand_dims(b, tuple(range(n - b.ndim)))
return a, b
def _yz(y, z, t, yout):
"""
Shared implementation of peaksampl and sumpeaks.
"""
y = np.asarray(y)
z = np.asarray(z)
t = np.asarray(t)
y = np.pad(y, [(0, 0)] * (y.ndim - 1) + [(1, 1)], constant_values=yout)
offset = np.argmax(np.abs(y), axis=-1)
ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1)
ampl = np.squeeze(ampl, -1)
indices = t[..., :, None] - t[..., None, :] + offset[..., None, None]
indices = np.minimum(indices, y.shape[-1] - 1)
indices = np.maximum(indices, 0)
N = t.shape[-1]
indices = indices.reshape(indices.shape[:-2] + (N * N,))
n = max(y.ndim, indices.ndim)
y, indices = _adddims(y, indices)
y = np.take_along_axis(y, indices, -1)
eps = np.finfo(float).eps * N * N * ampl
y[..., ::N + 1] += np.expand_dims(eps, -1)
y = y.reshape(y.shape[:-1] + (N, N))
z = z[..., None]
y, z = _adddims(y, z)
return y, z
def peaksampl(y, z, t, yout=0):
"""
Get peak amplitudes given their sum.
This assumes that the position of the signals is given by peaks positions
even when they are summed.
Parameters
----------
y : array (..., M,)
The single signal shape.
z : array (..., N,)
The peak height in the sum of the signals for each peak.
t : int array (..., N,)
The indices of the peaks in the sum.
yout : scalar
The value of the signal outside the provided values, default 0.
Return
------
a : array (..., N),
The amplitudes such that z_i = sum_j a_j * y[t_i - t_j].
Broadcasted along non-last axis.
"""
y, z = _yz(y, z, t, yout)
a = np.linalg.solve(y, z)
return np.squeeze(a, -1)
def sumpeaks(y, a, t, yout=0):
"""
Compute the peak heights of a sum of signals.
This assumes that the position of the peaks is given by the signal
positions even when they are summed.
Parameters
----------
y : array (..., M,)
The single signal shape.
a : array (..., N,)
The amplitudes of the signals (`y` is multiplied by `a`).
t : int array (..., N,)
The indices of the position of the signals.
yout : scalar
The value of the signal outside the provided values, default 0.
Return
------
z : array (..., N,)
The peak height in the sum of the signals for each signal. Broadcasted
along non-last axis.
"""
y, a = _yz(y, a, t, yout)
z = np.matmul(y, a)
return np.squeeze(z, axis=-1)
if __name__ == '__main__':
from matplotlib import pyplot as plt
from scipy import signal
y = np.exp(-np.linspace(0, 10, 1000) / 10)
i = np.arange(1, 1000)
t0 = np.array([10, 340, 523])
a0 = np.array([3, 2, 1])
indices = i - t0[:, None]
z = np.take(y, indices, mode='clip') * a0[:, None]
z = np.where((indices < 0) | (indices >= len(y)), 0, z)
z = np.sum(z, axis=0)
t, = signal.argrelmax(z)
assert len(t) == len(t0)
a = peaksampl(y, z[t], t)
h = sumpeaks(y, a, t)
fig, ax = plt.subplots(num='peaksampl', clear=True)
ax.plot(z, color='#f55')
ax.vlines(t0, 0, a0, color='gray', zorder=3)
ax.vlines(t, 0, a, linestyle='--', zorder=3)
ax.plot(t, h, 'ok')
ax.grid('major', linestyle='--')
fig.tight_layout()
fig.show()
| 28.822581
| 78
| 0.546726
| 560
| 3,574
| 3.4375
| 0.255357
| 0.01039
| 0.028052
| 0.010909
| 0.336104
| 0.275325
| 0.246234
| 0.214026
| 0.188052
| 0.188052
| 0
| 0.02549
| 0.286514
| 3,574
| 123
| 79
| 29.056911
| 0.729412
| 0.339675
| 0
| 0
| 0
| 0
| 0.01857
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 1
| 0.067797
| false
| 0
| 0.050847
| 0
| 0.186441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9b9340675c6ceead7ff166bf8fe4d65fa580b58
| 4,597
|
py
|
Python
|
backend/Washlist/tests.py
|
henrikhorluck/tdt4140-washlists
|
a75c3bc38a3f915eb48cf3e9ecba848f46a2bcaa
|
[
"MIT"
] | null | null | null |
backend/Washlist/tests.py
|
henrikhorluck/tdt4140-washlists
|
a75c3bc38a3f915eb48cf3e9ecba848f46a2bcaa
|
[
"MIT"
] | 2
|
2020-05-02T18:17:44.000Z
|
2020-05-02T18:18:02.000Z
|
backend/Washlist/tests.py
|
henrikhorluck/tdt4140-washlists
|
a75c3bc38a3f915eb48cf3e9ecba848f46a2bcaa
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from Dormroom.models import Dormroom
from SIFUser.mixins import AuthTestMixin
from StudentVillage.models import StudentVillage
from Washlist.jobs import reset_washlists
from Washlist.models.Templates import TemplateListItem, TemplateWashList
from Washlist.models.WashLists import ListItem
from Washlist.serializer import TemplateWashListSerializer
class WashListTemplateTest(TestCase):
room = None
def setUp(self):
village = StudentVillage.objects.create(name="Moholt")
self.room = Dormroom.objects.create(number=1, village=village)
temp_list = TemplateWashList.objects.create(title="Moholt")
village.templateWashList = temp_list
village.save()
def test_add_to_template_adds_to_each_list(self):
desc = "Vask badet"
temp_list = TemplateWashList.objects.get(title="Moholt")
TemplateListItem.objects.create(description=desc, washlist=temp_list).save()
self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description)
class WeeklyResetOfWashlistsTest(TestCase):
def setUp(self):
"""
Create a Washlist item that is completed
the method also sets up a village and a room to relate the Washlist item to
satisfy the db constraints
"""
village = StudentVillage.objects.create(name="Moholt")
self.room = Dormroom.objects.create(number=1, village=village)
temp_list = TemplateWashList.objects.create(title="Moholt")
village.templateWashList = temp_list
village.save()
self.item = ListItem.objects.create(
pk=1, dormroom=self.room, desc="Vask badet", completed=True
)
self.item.save()
def test_job_resets_items(self):
"""
Test that job to reset Washlist items when run manually actually rests the databases
Washlist items
"""
reset_washlists()
self.assertEqual(False, ListItem.objects.get(pk=1).completed)
class WashlistTemplateAPITest(AuthTestMixin):
def setUp(self):
super().setUp()
self.temp_list = TemplateWashList.objects.create(title="Moholt")
village = StudentVillage.objects.create(
name="Moholt", templateWashList=self.temp_list
)
self.room = Dormroom.objects.create(number=1, village=village)
self.item = ListItem.objects.create(
pk=1, dormroom=self.room, desc="Vask badet", completed=True
)
def test_get_template_list(self):
url = reverse("templatewashlist-list")
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data[0],
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Moholt")
).data,
)
def test_get_detail_template_list(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data,
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Moholt")
).data,
)
def test_add_template_washlist(self):
url = reverse("templatewashlist-list")
response = self.client.post(
url, {"title": "Tyholt", "village": 1}, HTTP_AUTHORIZATION=self.auth
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
response.data,
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Tyholt")
).data,
)
def test_partial_update(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.patch(
url, {"title": "Berg"}, HTTP_AUTHORIZATION=self.auth
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data,
TemplateWashListSerializer(TemplateWashList.objects.get(title="Berg")).data,
)
def test_destroy(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(TemplateWashList.objects.count(), 0)
| 35.091603
| 92
| 0.67022
| 493
| 4,597
| 6.137931
| 0.227181
| 0.051553
| 0.068407
| 0.051223
| 0.575017
| 0.562789
| 0.542631
| 0.542631
| 0.496034
| 0.421679
| 0
| 0.00766
| 0.233196
| 4,597
| 130
| 93
| 35.361538
| 0.85078
| 0.052861
| 0
| 0.452632
| 0
| 0
| 0.054168
| 0.025916
| 0
| 0
| 0
| 0
| 0.126316
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.252632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ba39e57d52ad0baaeb81fbe95a03b7bb17d4ad
| 3,792
|
py
|
Python
|
torchvision/prototype/models/mobilenetv3.py
|
piyush01123/vision
|
c6722307e6860057b4855483d237fe00a213dcf6
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/prototype/models/mobilenetv3.py
|
piyush01123/vision
|
c6722307e6860057b4855483d237fe00a213dcf6
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/prototype/models/mobilenetv3.py
|
piyush01123/vision
|
c6722307e6860057b4855483d237fe00a213dcf6
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import partial
from typing import Any, Optional, List
from torchvision.prototype.transforms import ImageNetEval
from torchvision.transforms.functional import InterpolationMode
from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig
from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES
from ._utils import handle_legacy_interface, _ovewrite_named_param
__all__ = [
"MobileNetV3",
"MobileNet_V3_Large_Weights",
"MobileNet_V3_Small_Weights",
"mobilenet_v3_large",
"mobilenet_v3_small",
]
def _mobilenet_v3(
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> MobileNetV3:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
_COMMON_META = {
"task": "image_classification",
"architecture": "MobileNetV3",
"publication_year": 2019,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
class MobileNet_V3_Large_Weights(WeightsEnum):
ImageNet1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_COMMON_META,
"num_params": 5483032,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"acc@1": 74.042,
"acc@5": 91.340,
},
)
ImageNet1K_V2 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth",
transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 5483032,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
"acc@1": 75.274,
"acc@5": 92.566,
},
)
default = ImageNet1K_V2
class MobileNet_V3_Small_Weights(WeightsEnum):
ImageNet1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_COMMON_META,
"num_params": 2542856,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"acc@1": 67.668,
"acc@5": 87.402,
},
)
default = ImageNet1K_V1
@handle_legacy_interface(weights=("pretrained", MobileNet_V3_Large_Weights.ImageNet1K_V1))
def mobilenet_v3_large(
*, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV3:
weights = MobileNet_V3_Large_Weights.verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
@handle_legacy_interface(weights=("pretrained", MobileNet_V3_Small_Weights.ImageNet1K_V1))
def mobilenet_v3_small(
*, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV3:
weights = MobileNet_V3_Small_Weights.verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
| 34.472727
| 119
| 0.704114
| 426
| 3,792
| 5.953052
| 0.288732
| 0.108438
| 0.063091
| 0.054416
| 0.549685
| 0.494874
| 0.468849
| 0.413249
| 0.413249
| 0.413249
| 0
| 0.048403
| 0.182753
| 3,792
| 109
| 120
| 34.788991
| 0.769926
| 0
| 0
| 0.238636
| 0
| 0.034091
| 0.214399
| 0.013713
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0
| 0.090909
| 0
| 0.238636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9bb907819b5835937644fde4b8d08e5dd987580
| 1,036
|
py
|
Python
|
crawler/tests.py
|
mental689/paddict
|
493268b62531c698687d42416edf61c602250133
|
[
"MIT"
] | 1
|
2019-06-22T10:28:21.000Z
|
2019-06-22T10:28:21.000Z
|
crawler/tests.py
|
mental689/paddict
|
493268b62531c698687d42416edf61c602250133
|
[
"MIT"
] | 4
|
2020-09-05T01:48:18.000Z
|
2022-03-02T04:29:25.000Z
|
crawler/tests.py
|
mental689/paddict
|
493268b62531c698687d42416edf61c602250133
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
# Create your tests here.
from crawler.download import *
from crawler.models import *
class AnimalDownloadTestCase(TestCase):
def setUp(self):
self.stopWords = ["CVPR 2019", "Computer Vision Foundation."]
self.url = "/Users/tuannguyenanh/Desktop/cvpr2019.html"#"http://openaccess.thecvf.com/CVPR2019.py"
self.root = "http://openaccess.thecvf.com/"
self.event = Event.objects.filter(shortname='CVPR2019').first()
if self.event is None:
self.event = Event(shortname='CVPR2019')
self.event.save()
def test_animal_can_download(self):
#print(get_html(self.url))
f = open(self.url)
soup = parse_html(f.read())
f.close()
f = open('cvpr2019.bib', 'w')
print(soup.title)
bibtexs = soup.find_all("div", attrs={"class": "bibref"})
#print(bibtexs)
for bib in bibtexs:
print(bib.text)
f.write(bib.text.replace('<br>', '\n'))
f.close()
| 32.375
| 106
| 0.608108
| 126
| 1,036
| 4.952381
| 0.555556
| 0.057692
| 0.064103
| 0.073718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03073
| 0.246139
| 1,036
| 31
| 107
| 33.419355
| 0.768246
| 0.100386
| 0
| 0.086957
| 0
| 0
| 0.168649
| 0.045405
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.26087
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9bfcc9ca3f71d3591d1b453eea9313adf491d9f
| 452
|
py
|
Python
|
test_scripts/xml_example.py
|
petervdb/testrep1
|
76b6eb3de2deb9596c055f252191e28587d5520c
|
[
"MIT"
] | 1
|
2015-11-17T21:35:44.000Z
|
2015-11-17T21:35:44.000Z
|
test_scripts/xml_example.py
|
petervdb/testrep1
|
76b6eb3de2deb9596c055f252191e28587d5520c
|
[
"MIT"
] | null | null | null |
test_scripts/xml_example.py
|
petervdb/testrep1
|
76b6eb3de2deb9596c055f252191e28587d5520c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from urllib.request import urlopen
from xml.etree.ElementTree import parse
# Download the RSS feed and parse it
u = urlopen('http://planet.python.org/rss20.xml')
doc = parse(u)
# Extract and output tags of interest
for item in doc.iterfind('channel/item'):
title = item.findtext('title')
date = item.findtext('pubDate')
link = item.findtext('link')
print(title)
print(date)
print(link)
print()
print("Program executed.")
| 20.545455
| 49
| 0.725664
| 67
| 452
| 4.895522
| 0.641791
| 0.109756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007673
| 0.134956
| 452
| 21
| 50
| 21.52381
| 0.831202
| 0.19469
| 0
| 0
| 0
| 0
| 0.219444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.384615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9c06414f6de5d6df932f87abe0ac2addfe2d410
| 1,489
|
py
|
Python
|
contacts/urls.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 1
|
2019-04-21T18:57:57.000Z
|
2019-04-21T18:57:57.000Z
|
contacts/urls.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | null | null | null |
contacts/urls.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | null | null | null |
"""dedupper_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from contacts import views
admin.autodiscover()
urlpatterns = [
path('', views.index, name='contact_index'),
path('', views.index, name='lead_index'),
path('contacts/', views.contacts, name='contacts'),
path('leads/', views.leads, name='leads'),
path('table/', views.table, name='table'),
path('plotly/', views.plotly, name='plotly'),
# url(r'^keys', views.upload, name='keys'),
# path('key-gen/', views.key_gen, name='key-gen'),
# path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'),
# path('run/', views.run, name='run'),
# path('sorted/<id>', views.merge, name='merge'),
# path('sorted/export/<type>', views.download, name='export'),
# path('sorted/report/<type>', views.download_times, name='report'),
]
| 38.179487
| 85
| 0.672263
| 209
| 1,489
| 4.746411
| 0.368421
| 0.060484
| 0.015121
| 0.024194
| 0.117944
| 0.117944
| 0.075605
| 0
| 0
| 0
| 0
| 0.006354
| 0.154466
| 1,489
| 38
| 86
| 39.184211
| 0.781573
| 0.699127
| 0
| 0
| 0
| 0
| 0.172811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9c1d738b7414d020a32d72c8b5b4b39a4b6d1d4
| 2,667
|
py
|
Python
|
CPB100/lab2b/scheduled/ingestapp.py
|
pranaynanda/training-data-analyst
|
f10ab778589129239fd5b277cfdefb41638eded5
|
[
"Apache-2.0"
] | null | null | null |
CPB100/lab2b/scheduled/ingestapp.py
|
pranaynanda/training-data-analyst
|
f10ab778589129239fd5b277cfdefb41638eded5
|
[
"Apache-2.0"
] | null | null | null |
CPB100/lab2b/scheduled/ingestapp.py
|
pranaynanda/training-data-analyst
|
f10ab778589129239fd5b277cfdefb41638eded5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import os
import logging
import transform
import flask
import google.cloud.storage as gcs
# [start config]
app = flask.Flask(__name__)
# Configure this environment variable via app.yaml
CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
#
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
# [end config]
@app.route('/')
def welcome():
return '<html><a href="ingest">ingest last week</a> earthquake data</html>'
@app.route('/ingest')
def ingest_last_week():
try:
# verify that this is a cron job request
is_cron = flask.request.headers['X-Appengine-Cron']
logging.info('Received cron request {}'.format(is_cron))
# create png
url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv'
outfile = 'earthquakes.png'
status = 'scheduled ingest of {} to {}'.format(url, outfile)
logging.info(status)
transform.create_png(url, outfile)
# upload to cloud storage
client = gcs.Client()
bucket = client.get_bucket(CLOUD_STORAGE_BUCKET)
blob = gcs.Blob('earthquakes/earthquakes.png', bucket)
blob.upload_from_filename(outfile)
# change permissions
blob.make_public()
status = 'uploaded {} to {}'.format(outfile, blob.name)
logging.info(status)
except KeyError as e:
status = '<html>Sorry, this capability is accessible only by the Cron service, but I got a KeyError for {} -- try invoking it from <a href="{}"> the GCP console / AppEngine / taskqueues </a></html>'.format(
e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON')
logging.info('Rejected non-Cron request')
return status
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# [END app]
| 32.13253
| 214
| 0.683915
| 366
| 2,667
| 4.904372
| 0.486339
| 0.033426
| 0.030084
| 0.017827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011199
| 0.196475
| 2,667
| 82
| 215
| 32.52439
| 0.826412
| 0.285339
| 0
| 0.047619
| 0
| 0.047619
| 0.384697
| 0.014346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.119048
| 0.02381
| 0.261905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9c731695680778a55c685fcfc15ab5e3eccf437
| 5,438
|
py
|
Python
|
dramkit/_tmp/VMD.py
|
Genlovy-Hoo/dramkit
|
fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7
|
[
"MIT"
] | null | null | null |
dramkit/_tmp/VMD.py
|
Genlovy-Hoo/dramkit
|
fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7
|
[
"MIT"
] | null | null | null |
dramkit/_tmp/VMD.py
|
Genlovy-Hoo/dramkit
|
fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
def vmd( signal, alpha, tau, K, DC, init, tol):
'''
用VMD分解算法时只要把信号输入进行分解就行了,只是对信号进行分解,和采样频率没有关系,
VMD的输入参数也没有采样频率。
VMD分解出的各分量在输出量 u 中,这个和信号的长度、信号的采样频率没有关系。
迭代时各分量的中心频率在输出量omega,可以用2*pi/fs*omega求出中心频率,
但迭代时的频率是变化的。
Input and Parameters:
signal - the time domain signal (1D) to be decomposed
alpha - the balancing parameter of the data-fidelity constraint
tau - time-step of the dual ascent ( pick 0 for noise-slack )
K - the number of modes to be recovered
DC - true if the first mode is put and kept at DC (0-freq)
init - 0 = all omegas start at 0
1 = all omegas start uniformly distributed
2 = all omegas initialized randomly
tol - tolerance of convergence criterion; typically around 1e-6
Output:
u - the collection of decomposed modes
u_hat - spectra of the modes
omega - estimated mode center-frequencies
'''
# Period and sampling frequency of input signal
#分解算法中的采样频率和时间是标准化的,分解信号的采样时间为1s,然后就得到相应的采样频率。采样时间间隔:1/ length(signal),频率: length(signal)。
save_T = len(signal)
fs = 1 / save_T
# extend the signal by mirroring镜像延拓
T = save_T
f_mirror = []
temp = signal[0:T//2]
f_mirror.extend(temp[::-1]) #temp[::-1] 倒序排列
f_mirror.extend(signal)
temp = signal[T//2:T]
f_mirror.extend(temp[::-1])
f = f_mirror
# Time Domain 0 to T (of mirrored signal)
T = len(f)
t = [(i + 1) / T for i in range(T)] # 列表从1开始
# Spectral Domain discretization
#freqs 进行移位是由于进行傅里叶变换时,会有正负对称的频率,分析时一般只有正频率,所以看到的频谱图是没有负频率的
freqs = np.array( [i - 0.5 - 1 / T for i in t] )
# Maximum number of iterations (if not converged yet, then it won't anyway)
N = 500
# For future generalizations: individual alpha for each mode
Alpha = alpha * np.ones(K)
# Construct and center f_hat
transformed = np.fft.fft(f) # 使用fft函数对信号进行快速傅里叶变换。
f_hat = np.fft.fftshift(transformed) # 使用fftshift函数进行移频操作。
f_hat_plus = f_hat
f_hat_plus[0:T // 2] = 0
# f_hat_plus[0:T] = 1 #????????????????????????????////////////
# matrix keeping track of every iterant // could be discarded for mem
u_hat_plus = [np.zeros((N, len(freqs)), dtype=complex) for i in range(K)]
# Initialization of omega_k
omega_plus = np.zeros((N, K))
if init == 1:
for i in range(K):
omega_plus[0, i] = (0.5 / K) * i
elif init == 2:
omega_plus[0, :] = np.sort(np.exp(np.log(fs) + (np.log(0.5) - np.log(fs)) * np.random.rand(K)))
else:
omega_plus[0, :] = 0
# if DC mode imposed, set its omega to 0
if DC:
omega_plus[0, 0] = 0
# start with empty dual variables
lambda_hat = np.zeros( (N, len(freqs)), dtype=complex)
# other inits
eps = 2.2204e-16 # python里没有eps功能
uDiff = tol + eps # update step
n = 1 # loop counter
sum_uk = 0 # accumulator
#----------- Main loop for iterative updates----------
while (uDiff > tol and n < N ): #not converged and below iterations limit
#update first mode accumulator
k = 0
sum_uk = u_hat_plus[K-1][n-1,:]+ sum_uk - u_hat_plus[0][n-1,:] #sum_uk 一直都等于0(1,2000)????????????????
#update spectrum of first mode through Wiener filter of residuals
u_hat_plus[k][n,:] = (f_hat_plus - sum_uk - lambda_hat[n-1,:]/2)/(1+Alpha[k]*(freqs - omega_plus[n-1,k])**2)
#update first omega if not held at 0
if not DC:
omega_plus[n,k] = (freqs[T//2:T]*np.mat(np.abs(u_hat_plus[k][n, T//2:T])**2).H)/np.sum(np.abs(u_hat_plus[k][n,T//2:T])**2)
#update of any other mode
for k in range(K-1):
#accumulator
sum_uk = u_hat_plus[k][n,:] + sum_uk - u_hat_plus[k+1][n-1,:]
#mode spectrum
u_hat_plus[k+1][n,:] = (f_hat_plus - sum_uk - lambda_hat[n-1,:]/2)/(1+Alpha[k+1]*(freqs - omega_plus[n-1,k+1])**2)
#center frequencies
omega_plus[n,k+1] = (freqs[T//2:T]*np.mat(np.abs(u_hat_plus[k+1][n, T//2:T])**2).H)/np.sum(np.abs(u_hat_plus[k+1][n,T//2:T])**2)
#Dual ascent
lambda_hat[n,:] = lambda_hat[n-1,:] + tau*(np.sum([ u_hat_plus[i][n,:] for i in range(K)],0) - f_hat_plus)
#loop counter
n = n+1
#converged yet?
uDiff = eps
for i in range(K):
uDiff = uDiff + 1/T*(u_hat_plus[i][n-1,:]-u_hat_plus[i][n-2,:])*np.mat((u_hat_plus[i][n-1,:]-u_hat_plus[i][n-2,:]).conjugate()).H
uDiff = np.abs(uDiff)
# ------ Postprocessing and cleanup-------
#discard empty space if converged early
N = min(N,n)
omega = omega_plus[0:N,:]
#Signal reconstruction
u_hat = np.zeros((T, K), dtype=complex)
temp = [u_hat_plus[i][N-1,T//2:T] for i in range(K) ]
u_hat[T//2:T,:] = np.squeeze(temp).T
temp = np.squeeze(np.mat(temp).conjugate())
u_hat[1:(T//2+1),:] = temp.T[::-1]
u_hat[0,:] = (u_hat[-1,:]).conjugate()
u = np.zeros((K,len(t)))
for k in range(K):
u[k,:]=np.real(np.fft.ifft(np.fft.ifftshift(u_hat[:,k])))
#remove mirror part
u = u[:,T//4:3*T//4]
#recompute spectrum
u_hat = np.zeros((T//2, K), dtype=complex)
for k in range(K):
u_hat[:,k]= np.squeeze( np.mat( np.fft.fftshift(np.fft.fft(u[k,:])) ).H)
return u, u_hat, omega
| 37.503448
| 141
| 0.580912
| 878
| 5,438
| 3.494305
| 0.252847
| 0.035202
| 0.044329
| 0.026402
| 0.203716
| 0.133963
| 0.102999
| 0.084746
| 0.084746
| 0.073664
| 0
| 0.029659
| 0.255976
| 5,438
| 144
| 142
| 37.763889
| 0.728621
| 0.400331
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014493
| false
| 0
| 0.014493
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9c81413c2bd63d72d0731352d31911ef52240f6
| 480
|
py
|
Python
|
forum/main.py
|
asmaasalih/my_project
|
89183d7a2578fa302e94ea29570ab527e9ca47b5
|
[
"MIT"
] | 1
|
2018-03-21T07:51:36.000Z
|
2018-03-21T07:51:36.000Z
|
forum/main.py
|
asmaasalih/my_project
|
89183d7a2578fa302e94ea29570ab527e9ca47b5
|
[
"MIT"
] | null | null | null |
forum/main.py
|
asmaasalih/my_project
|
89183d7a2578fa302e94ea29570ab527e9ca47b5
|
[
"MIT"
] | null | null | null |
import models
import stores
member1 =models.Member("ahmed",33)
member2 =models.Member("mohamed",30)
post1=models.Post("Post1", "Content1")
post2= models.Post("Post2", "Content2")
post3= models.Post("Post3", "Content3")
#member store
member_store=stores.MemberStore()
member_store.add(member1)
member_store.add(member2)
print (member_store.get_all())
post_store=stores.PostStore()
post_store.add(post1)
post_store.add(post2)
post_store.add(post3)
print (post_store.get_all())
| 20.869565
| 39
| 0.772917
| 69
| 480
| 5.217391
| 0.347826
| 0.152778
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044843
| 0.070833
| 480
| 22
| 40
| 21.818182
| 0.762332
| 0.025
| 0
| 0
| 0
| 0
| 0.109208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9c964b752a9622a17123202e7aae50d1718a48a
| 1,345
|
py
|
Python
|
question3.py
|
nosisky/algo-solution
|
a9276f73ba63b1a0965c194885aea6cadfab0e0b
|
[
"MIT"
] | 1
|
2019-08-14T12:32:49.000Z
|
2019-08-14T12:32:49.000Z
|
question3.py
|
nosisky/algo-solution
|
a9276f73ba63b1a0965c194885aea6cadfab0e0b
|
[
"MIT"
] | null | null | null |
question3.py
|
nosisky/algo-solution
|
a9276f73ba63b1a0965c194885aea6cadfab0e0b
|
[
"MIT"
] | null | null | null |
# A string S consisting of N characters is considered to be properly nested if any of the following conditions is true:
# S is empty;
# S has the form "(U)" or "[U]" or "{U}" where U is a properly nested string; S has the form "VW" where V and W are properly nested strings.
# For example, the string "{[()()]}" is properly nested but "([)()]" is not.
# Write a function:
# int solution(char *S);
# that, given a string S consisting of N characters, returns 1 if S is properly nested and 0 otherwise.
# For example, given S = "{[()()]}", the function should return 1 and given S = "([)()]", the function should return 0, as explained above.
# Assume that:
# N is an integer within the range [0..200,000];
# string S consists only of the following characters: "(", "{", "[", "]", "}" and/or ")". Complexity:
# expected worst-case time complexity is O(N);
# expected worst-case space complexity is O(N) (not counting the storage required for input arguments).
def solution(s):
sets = dict(zip('({[', ')}]'))
if(not isinstance(s, str)):
return "Invalid input"
collector = []
for bracket in s:
if(bracket in sets):
collector.append(sets[bracket])
elif bracket not in(sets.values()):
return "Invalid input"
elif (bracket != collector.pop()):
return False
return not collector
print(solution("()[]{}"))
| 42.03125
| 140
| 0.66171
| 203
| 1,345
| 4.384236
| 0.433498
| 0.078652
| 0.017978
| 0.040449
| 0.134831
| 0.134831
| 0.069663
| 0
| 0
| 0
| 0
| 0.010148
| 0.194052
| 1,345
| 31
| 141
| 43.387097
| 0.810886
| 0.692193
| 0
| 0.142857
| 0
| 0
| 0.095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.357143
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ca4ff833bf2ee267f7f1b8ecf69069cd8c4b31
| 1,996
|
py
|
Python
|
Teil_27_Game_of_Life_3d.py
|
chrMenzel/A-beautiful-code-in-Python
|
92ee43c1fb03c299384d4de8bebb590c5ba1b623
|
[
"MIT"
] | 50
|
2018-12-23T15:46:16.000Z
|
2022-03-28T15:49:59.000Z
|
Teil_27_Game_of_Life_3d.py
|
chrMenzel/A-beautiful-code-in-Python
|
92ee43c1fb03c299384d4de8bebb590c5ba1b623
|
[
"MIT"
] | 9
|
2018-12-03T10:31:29.000Z
|
2022-01-20T14:41:33.000Z
|
Teil_27_Game_of_Life_3d.py
|
chrMenzel/A-beautiful-code-in-Python
|
92ee43c1fb03c299384d4de8bebb590c5ba1b623
|
[
"MIT"
] | 69
|
2019-02-02T11:59:09.000Z
|
2022-03-28T15:54:28.000Z
|
import bpy
import random as rnd
from collections import Counter
import itertools as iter
feld_von, feld_bis = -4, 4
spielfeld_von, spielfeld_bis = feld_von-6, feld_bis+6
anz = int((feld_bis-feld_von)**3*.3)
spielfeld = {(rnd.randint(feld_von, feld_bis), rnd.randint(
feld_von, feld_bis), rnd.randint(feld_von, feld_bis)) for _ in range(anz)}
animate_frame = 8
def nachbarn(pos):
for x,y,z in iter.product(range(-1,2), repeat = 3):
if z == y == x == 0: continue
yield pos[0]+x, pos[1]+y, pos[2]+z
def nächsteGeneration(spielfeld):
nachb = Counter([p for pos in spielfeld for p in nachbarn(pos)])
return {pos for pos, anz in nachb.items() if anz == 6 or (anz in (5, 6, 7, 8) and pos in spielfeld)}
def scale_rotate(ob, scale, rot, fr):
ob.scale = (scale, scale, scale)
ob.rotation_euler.rotate_axis("Z", rot)
ob.keyframe_insert(data_path='rotation_euler', frame=fr)
ob.keyframe_insert(data_path='scale', frame=fr)
bpy.ops.mesh.primitive_cube_add(size=0.001, location=(0, 0, 0))
orig_cube = bpy.context.active_object
n = "cube"
m = orig_cube.data.copy()
cubes = {}
for x,y,z in iter.product(range(spielfeld_von,spielfeld_bis), repeat = 3):
o = bpy.data.objects.new(n, m)
o.location = (x, y, z)
cubes[x, y, z] = o
bpy.context.collection.objects.link(o)
o.select_set(False)
for i in range(200):
print(f'Durchlauf No. {i}, Anz. Zellen = {len(spielfeld)}')
spielfeld2 = nächsteGeneration(spielfeld)
dead = spielfeld - spielfeld2
new = spielfeld2 - spielfeld
spielfeld = spielfeld2
if not new and not dead:
break
for zelle in new | dead:
if zelle not in cubes:
continue
ob = cubes[zelle]
if zelle in new:
scale_rotate(ob, 0.001, -3.141/2, (i-1)*animate_frame)
scale_rotate(ob, 750, 3.141/2, i * animate_frame)
else:
scale_rotate(ob, 750, 3.141/2, (i-1) * animate_frame)
scale_rotate(ob, 0.001, -3.141/2, i * animate_frame)
if not spielfeld:
break
bpy.context.scene.frame_current = 1
| 28.927536
| 102
| 0.67986
| 341
| 1,996
| 3.859238
| 0.302053
| 0.031915
| 0.049392
| 0.042553
| 0.234043
| 0.197568
| 0.179331
| 0.179331
| 0.12386
| 0.103343
| 0
| 0.043372
| 0.17986
| 1,996
| 68
| 103
| 29.352941
| 0.760538
| 0
| 0
| 0.037037
| 0
| 0
| 0.036573
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.074074
| 0
| 0.148148
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ca98991068e30844d7bcc8e336f70de5eef5a9
| 1,824
|
py
|
Python
|
power_perceiver/xr_batch_processor/reduce_num_pv_systems.py
|
openclimatefix/power_perceiver
|
bafcdfaf6abf42fbab09da641479f74709ddd395
|
[
"MIT"
] | null | null | null |
power_perceiver/xr_batch_processor/reduce_num_pv_systems.py
|
openclimatefix/power_perceiver
|
bafcdfaf6abf42fbab09da641479f74709ddd395
|
[
"MIT"
] | 33
|
2022-02-16T07:51:41.000Z
|
2022-03-31T11:24:11.000Z
|
power_perceiver/xr_batch_processor/reduce_num_pv_systems.py
|
openclimatefix/power_perceiver
|
bafcdfaf6abf42fbab09da641479f74709ddd395
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
import numpy as np
import xarray as xr
from power_perceiver.load_prepared_batches.data_sources import PV
from power_perceiver.load_prepared_batches.data_sources.prepared_data_source import XarrayBatch
@dataclass
class ReduceNumPVSystems:
"""Reduce the number of PV systems per example to `requested_num_pv_systems`.
Randomly select PV systems for each example. If there are less PV systems available
than requested, then randomly sample with duplicates allowed.
This is implemented as an xr_batch_processor so it can run after
SelectPVSystemsNearCenterOfImage.
"""
requested_num_pv_systems: int
def __post_init__(self):
self.rng = np.random.default_rng() # Seeded by seed_rngs worker_init_function
def __call__(self, xr_batch: XarrayBatch) -> XarrayBatch:
pv_batch = xr_batch[PV]
num_examples = len(pv_batch.example)
selection = np.zeros(shape=(num_examples, self.requested_num_pv_systems), dtype=np.int32)
for example_i in range(num_examples):
pv_mask_for_example = pv_batch.pv_mask.isel(example=example_i).values
all_indicies = np.nonzero(pv_mask_for_example)[0]
# Only allow a PV system to be chosen multiple times for this example if there are
# less available PV systems than requested PV systems.
replace = len(all_indicies) < self.requested_num_pv_systems
chosen_indicies = self.rng.choice(
all_indicies, size=self.requested_num_pv_systems, replace=replace
)
selection[example_i] = chosen_indicies
selection = xr.DataArray(selection, dims=("example", "pv_system"))
pv_batch = pv_batch.isel(pv_system=selection)
xr_batch[PV] = pv_batch
return xr_batch
| 39.652174
| 97
| 0.721491
| 247
| 1,824
| 5.044534
| 0.421053
| 0.072231
| 0.05618
| 0.08427
| 0.170947
| 0.077047
| 0.077047
| 0.077047
| 0
| 0
| 0
| 0.002096
| 0.215461
| 1,824
| 45
| 98
| 40.533333
| 0.868623
| 0.272478
| 0
| 0
| 0
| 0
| 0.012327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.192308
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9cc65aafe29eb9820f902e036880e65947e1e2d
| 857
|
py
|
Python
|
HelloWorld_python/log/demo_log_3.py
|
wang153723482/HelloWorld_my
|
b8642ad9742f95cfebafc61f25b00e917485e50c
|
[
"Apache-2.0"
] | null | null | null |
HelloWorld_python/log/demo_log_3.py
|
wang153723482/HelloWorld_my
|
b8642ad9742f95cfebafc61f25b00e917485e50c
|
[
"Apache-2.0"
] | null | null | null |
HelloWorld_python/log/demo_log_3.py
|
wang153723482/HelloWorld_my
|
b8642ad9742f95cfebafc61f25b00e917485e50c
|
[
"Apache-2.0"
] | null | null | null |
#encoding=utf8
# 按天生成文件
import logging
import time
from logging.handlers import TimedRotatingFileHandler
#----------------------------------------------------------------------
if __name__ == "__main__":
logFilePath = "timed_test.log"
logger = logging.getLogger("YouLoggerName")
logger.setLevel(logging.INFO)
handler = TimedRotatingFileHandler(logFilePath,
when="d",
interval=1,
backupCount=7)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
for i in range(6):
logger.info("This is a info!")
logger.debug("This is a debug!")
# time.sleep(61)
| 29.551724
| 89
| 0.536756
| 77
| 857
| 5.857143
| 0.597403
| 0.066519
| 0.084257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009756
| 0.28238
| 857
| 29
| 90
| 29.551724
| 0.723577
| 0.124854
| 0
| 0
| 0
| 0
| 0.159732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9cda5cbb2749647d6a78abf80d9eb5c24205425
| 341
|
py
|
Python
|
tests/test_gen_epub.py
|
ffreemt/tmx2epub
|
55a59cb2a9b7f42031a65f64c29e5c43fdb487ea
|
[
"MIT"
] | null | null | null |
tests/test_gen_epub.py
|
ffreemt/tmx2epub
|
55a59cb2a9b7f42031a65f64c29e5c43fdb487ea
|
[
"MIT"
] | null | null | null |
tests/test_gen_epub.py
|
ffreemt/tmx2epub
|
55a59cb2a9b7f42031a65f64c29e5c43fdb487ea
|
[
"MIT"
] | null | null | null |
""" test gen_epub. """
from tmx2epub.gen_epub import gen_epub
def test_gen_epub2():
""" test_gen_epub2. """
from pathlib import Path
infile = r"tests\2.tmx"
stem = Path(infile).absolute().stem
outfile = f"{Path(infile).absolute().parent / stem}.epub"
assert gen_epub(infile, debug=True) == outfile
# assert 0
| 22.733333
| 61
| 0.653959
| 48
| 341
| 4.479167
| 0.5
| 0.130233
| 0.111628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018315
| 0.199413
| 341
| 14
| 62
| 24.357143
| 0.769231
| 0.123167
| 0
| 0
| 0
| 0
| 0.192308
| 0.108392
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9cde2fbd07898c518510cadb194827f6566c927
| 716
|
py
|
Python
|
pub_sub/python/http/checkout/app.py
|
amulyavarote/quickstarts
|
c21a8f58d515b28eaa8a3680388fa06995c2331b
|
[
"Apache-2.0"
] | null | null | null |
pub_sub/python/http/checkout/app.py
|
amulyavarote/quickstarts
|
c21a8f58d515b28eaa8a3680388fa06995c2331b
|
[
"Apache-2.0"
] | null | null | null |
pub_sub/python/http/checkout/app.py
|
amulyavarote/quickstarts
|
c21a8f58d515b28eaa8a3680388fa06995c2331b
|
[
"Apache-2.0"
] | null | null | null |
import json
import time
import random
import logging
import requests
import os
logging.basicConfig(level=logging.INFO)
base_url = os.getenv('BASE_URL', 'http://localhost') + ':' + os.getenv(
'DAPR_HTTP_PORT', '3500')
PUBSUB_NAME = 'order_pub_sub'
TOPIC = 'orders'
logging.info('Publishing to baseURL: %s, Pubsub Name: %s, Topic: %s' % (
base_url, PUBSUB_NAME, TOPIC))
for i in range(1, 10):
order = {'orderId': i}
# Publish an event/message using Dapr PubSub via HTTP Post
result = requests.post(
url='%s/v1.0/publish/%s/%s' % (base_url, PUBSUB_NAME, TOPIC),
json=order
)
logging.info('Published data: ' + json.dumps(order))
time.sleep(1)
| 25.571429
| 72
| 0.642458
| 100
| 716
| 4.49
| 0.5
| 0.062361
| 0.035635
| 0.062361
| 0.10245
| 0.10245
| 0
| 0
| 0
| 0
| 0
| 0.017825
| 0.21648
| 716
| 27
| 73
| 26.518519
| 0.782531
| 0.078212
| 0
| 0
| 0
| 0
| 0.241641
| 0.031915
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ce404499c062b33e8623b446d27dfebe6f033f
| 52,312
|
py
|
Python
|
jj.py
|
smailedge/pro
|
f86347d4368bc97aa860b37caa9ba10e84a93738
|
[
"Unlicense"
] | 1
|
2019-08-14T04:17:06.000Z
|
2019-08-14T04:17:06.000Z
|
jj.py
|
smailedge/pro
|
f86347d4368bc97aa860b37caa9ba10e84a93738
|
[
"Unlicense"
] | null | null | null |
jj.py
|
smailedge/pro
|
f86347d4368bc97aa860b37caa9ba10e84a93738
|
[
"Unlicense"
] | 7
|
2018-10-27T11:58:45.000Z
|
2021-02-11T19:45:30.000Z
|
# -*- coding: utf-8 -*-
from linepy import *
from datetime import datetime
from time import sleep
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse
#==============================================================================#
botStart = time.time()
cl = LINE()
#cl = LINE("TOKEN KAMU")
#cl = LINE("Email","Password")
cl.log("Auth Token : " + str(cl.authToken))
channelToken = cl.getChannelResult()
cl.log("Channel Token : " + str(channelToken))
clMID = cl.profile.mid
clProfile = cl.getProfile()
lineSettings = cl.getSettings()
oepoll = OEPoll(cl)
#==============================================================================#
readOpen = codecs.open("read.json","r","utf-8")
settingsOpen = codecs.open("temp.json","r","utf-8")
read = json.load(readOpen)
settings = json.load(settingsOpen)
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
msg_dict = {}
bl = [""]
myProfile["displayName"] = clProfile.displayName
myProfile["statusMessage"] = clProfile.statusMessage
myProfile["pictureStatus"] = clProfile.pictureStatus
#==============================================================================#
def restartBot():
print ("[ INFO ] BOT RESETTED")
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
def backupData():
try:
backup = settings
f = codecs.open('temp.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
backup = read
f = codecs.open('read.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except Exception as error:
logError(error)
return False
def logError(text):
cl.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
cl.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def helpmessage():
helpMessage = """╔═════════════
╠♥ ✿✿✿ 十香の特製Bot ✿✿✿ ♥
╠SR 設定已讀點
╠LR 查看誰已讀
╠Nk @ 標註踢人
╠Nk 全部再見
╠══✪〘 其他功能略 〙✪═══
"""
return helpMessage
wait = {
"share":False,
"sender" :{},
}
admin =['ud5ff1dff426cf9e3030c7ac2a61512f0','ua10c2ad470b4b6e972954e1140ad1891',clMID]
owners = ["ua10c2ad470b4b6e972954e1140ad1891","ud5ff1dff426cf9e3030c7ac2a61512f0"]
#if clMID not in owners:
# python = sys.executable
# os.execl(python, python, *sys.argv)
#==============================================================================#
def lineBot(op):
try:
if op.type == 0:
print ("[ 0 ] END OF OPERATION")
return
if op.type == 5:
print ("[ 5 ] NOTIFIED ADD CONTACT")
if settings["autoAdd"] == True:
cl.sendMessage(op.param1, "感謝您加入本帳為好友w".format(str(cl.getContact(op.param1).displayName)))
if op.type == 13:
print ("[ 13 ] NOTIFIED INVITE GROUP")
group = cl.getGroup(op.param1)
if settings["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
if op.type == 19:
if op.param2 not in owners:
if op.param2 in owners:
pass
elif wait["protect"] == True:
settings["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendMessage(op.param1,"")
else:
cl.sendMessage(op.param1,"")
if op.type == 24:
print ("[ 24 ] NOTIFIED LEAVE ROOM")
if settings["autoLeave"] == True:
cl.leaveRoom(op.param1)
if op.type == 25 or op.type == 26:
K0 = admin
msg = op.message
if wait["share"] == True:
K0 = msg._from
else:
K0 = admin
# if op.type == 25:
# to = msg.to
# receiver = str(to.displayName)
# print ("send" + receiver + str(text.lower()))
# if op.type == 26:
# to = msg._from
# sender = str(to.displayName)
# print ("receiver" + sender + str(text.lower()))
if op.type == 26 or op.type == 25:
print ("[ 25 ] SEND MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if text is None:
return
#==============================================================================#
if sender in K0:
if text.lower() == 'help':
helpMessage = helpmessage()
cl.sendMessage(to, str(helpMessage))
cl.sendContact(to,"u0a59c278b1529476ddb210cb5e827ffc")
cl.sendContact(to,"ufb30e2203f44bc7b72e28b09a88c9bbd")
#==============================================================================#
elif text.lower() == 'speed':
start = time.time()
cl.sendMessage(to, "計算中...")
elapsed_time = time.time() - start
cl.sendMessage(to,format(str(elapsed_time)))
elif text.lower() == 'restart':
cl.sendMessage(to, "重新啟動中...")
time.sleep(5)
cl.sendMessage(to, "重啟成功,請重新登入")
restartBot()
elif text.lower() == 'runtime':
timeNow = time.time()
runtime = timeNow - botStart
runtime = format_timespan(runtime)
cl.sendMessage(to, "系統已運作 {}".format(str(runtime)))
elif text.lower() == 'about':
try:
arr = []
owner = "ua10c2ad470b4b6e972954e1140ad1891"
creator = cl.getContact(owner)
contact = cl.getContact(clMID)
grouplist = cl.getGroupIdsJoined()
contactlist = cl.getAllContactIds()
blockedlist = cl.getBlockedContactIds()
ret_ = "╔══[ 關於使用者 ]"
ret_ += "\n╠ 使用者名稱 : {}".format(contact.displayName)
ret_ += "\n╠ 群組數 : {}".format(str(len(grouplist)))
ret_ += "\n╠ 好友數 : {}".format(str(len(contactlist)))
ret_ += "\n╠ 已封鎖 : {}".format(str(len(blockedlist)))
ret_ += "\n╠══[ 關於本bot ]"
ret_ += "\n╠ 版本 : 最新"
ret_ += "\n╠ 製作者 : {}".format(creator.displayName)
ret_ += "\n╚══[ 感謝您的使用 ]"
cl.sendMessage(to, str(ret_))
except Exception as e:
cl.sendMessage(msg.to, str(e))
#==============================================================================#
elif text.lower() == 'set':
try:
ret_ = "╔══[ 狀態 ]"
if settings["autoAdd"] == True: ret_ += "\n╠ Auto Add ✅"
else: ret_ += "\n╠ Auto Add ❌"
if settings["autoJoin"] == True: ret_ += "\n╠ Auto Join ✅"
else: ret_ += "\n╠ Auto Join ❌"
if settings["autoLeave"] == True: ret_ += "\n╠ Auto Leave ✅"
else: ret_ += "\n╠ Auto Leave ❌"
if settings["autoRead"] == True: ret_ += "\n╠ Auto Read ✅"
else: ret_ += "\n╠ Auto Read ❌"
if settings["reread"] ==True: ret_+="\n╠ Reread ✅"
else: ret_ += "\n╠ Reread ❌"
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif text.lower() == 'autoadd on':
settings["autoAdd"] = True
cl.sendMessage(to, "Auto Add on success")
elif text.lower() == 'autoadd off':
settings["autoAdd"] = False
cl.sendMessage(to, "Auto Add off success")
elif text.lower() == 'autojoin on':
settings["autoJoin"] = True
cl.sendMessage(to, "Auto Join on success")
elif text.lower() == 'autojoin off':
settings["autoJoin"] = False
cl.sendMessage(to, "Auto Join off success")
elif text.lower() == 'autoleave on':
settings["autoLeave"] = True
cl.sendMessage(to, "Auto Leave on success")
elif text.lower() == 'autojoin off':
settings["autoLeave"] = False
cl.sendMessage(to, "Auto Leave off success")
elif text.lower() == 'autoread on':
settings["autoRead"] = True
cl.sendMessage(to, "Auto Read on success")
elif text.lower() == 'autoread off':
settings["autoRead"] = False
cl.sendMessage(to, "Auto Read off success")
elif text.lower() == 'checksticker on':
settings["checkSticker"] = True
cl.sendMessage(to, "Berhasil mengaktifkan Check Details Sticker")
elif text.lower() == 'checksticker off':
settings["checkSticker"] = False
cl.sendMessage(to, "Berhasil menonaktifkan Check Details Sticker")
elif text.lower() == 'detectmention on':
settings["datectMention"] = True
cl.sendMessage(to, "Berhasil mengaktifkan Detect Mention")
elif text.lower() == 'detectmention off':
settings["datectMention"] = False
cl.sendMessage(to, "Berhasil menonaktifkan Detect Mention")
elif text.lower() == 'reread on':
settings["reread"] = True
cl.sendMessage(to,"reread on success")
elif text.lower() == 'reread off':
settings["reread"] = False
cl.sendMessage(to,"reread off success")
elif text.lower() == 'protect on':
settings["protect"] = True
cl.sendMessage(to, "Protect on success")
elif text.lower() == 'protect off':
settings["protect"] = False
cl.sendMessage(to, "Protect off success")
elif text.lower() == 'share on':
wait["share"] = True
cl.sendMessage(to, "已開啟分享")
elif text.lower() == 'share off':
wait["share"] = False
cl.sendMessage(to, "已關閉分享")
#==============================================================================#
elif text.lower() == 'admin ':
MENTION =eval(msg.contentMetadata['MENTION'])
inkey =MENTION['MENTIONEES'][0]['M']
admin.append(str(inkey))
cl.sendMessage(to,"已新增權限")
elif text.lower() == 'demin ':
MENTION =eval(msg.contentMetadata['MENTION'])
inkey =MENTION['MENTIONEES'][0]['M']
admin.remove(str(inkey))
cl.sendMessage(to,"已停止權限")
elif text.lower() == 'adminlist':
if admin == []:
cl.sendMessage(to,"無擁有權限者!")
else:
mc = "╔══[ Admin List ]"
for mi_d in admin:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(to,mc + "\n╚══[ Finish ]")
#==============================================================================#
elif text.lower() == 'me':
sendMessageWithMention(to, clMID)
cl.sendContact(to, clMID)
elif text.lower() == 'mymid':
cl.sendMessage(msg.to,"[MID]\n" + clMID)
elif text.lower() == 'myname':
me = cl.getContact(clMID)
cl.sendMessage(msg.to,"[Name]\n" + me.displayName)
elif text.lower() == 'mytoken':
me = cl.getContact(clMID)
cl.sendMessage(msg.to,"[StatusMessage]\n" + me.statusMessage)
elif text.lower() == 'mypicture':
me = cl.getContact(clMID)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'myvideoprofile':
me = cl.getContact(clMID)
cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp")
elif text.lower() == 'mycover':
me = cl.getContact(clMID)
cover = cl.getProfileCoverURL(clMID)
cl.sendImageWithURL(msg.to, cover)
elif msg.text.lower().startswith("contact "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
mi_d = contact.mid
cl.sendContact(msg.to, mi_d)
elif msg.text.lower().startswith("mid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
ret_ = "[ Mid User ]"
for ls in lists:
ret_ += "\n" + ls
cl.sendMessage(msg.to, str(ret_))
elif msg.text.lower().startswith("name "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
cl.sendMessage(msg.to, "[ 名字 ]\n" + contact.displayName)
for ls in lists:
contact = cl.getContact(ls)
cl.sendMessage(msg.to, "[ 個簽 ]\n" + contact.statusMessage)
for ls in lists:
path = "http://dl.profile.cl.naver.jp/" + cl.getContact(ls).pictureStatus
cl.sendImageWithURL(msg.to, str(path))
for ls in lists:
path = cl.getProfileCoverURL(ls)
pmath = "http://dl.profile.cl.naver.jp/" + cl.getContact(ls).pictureStatus
cl.sendImageWithURL(msg.to, path)
try:
key = eval(msg.contentMetadata["MENTION"])
u = key["MENTIONEES"][0]["M"]
cname = cl.getContact(u).displayName
cmid = cl.getContact(u).mid
cstatus = cl.getContact(u).statusMessage
cpic = cl.getContact(u).picturePath
cl.sendMessage(receiver, 'Nama : '+cname+'\nMID : '+cmid+'\nStatus Msg : '+cstatus+'\nPicture : http://dl.profile.line.naver.jp'+cpic)
cl.sendMessage(receiver, None, contentMetadata={'mid': cmid}, contentType=13)
if cl.getContact(u).videoProfile != None:
cl.sendVideoWithURL(receiver, 'http://dl.profile.line.naver.jp'+cpic+'/vp.small')
else:
cl.sendImageWithURL(receiver, 'http://dl.profile.line.naver.jp'+cpic)
except Exception as e:
cl.sendMessage(receiver, str(e))
if line != None:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = cl.getProfileCoverURL(ls)
cl.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("cloneprofile "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
contact = mention["M"]
break
try:
cl.cloneContactProfile(contact)
cl.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah")
except:
cl.sendMessage(msg.to, "Gagal clone member")
elif text.lower() == 'restoreprofile':
try:
clProfile.displayName = str(myProfile["displayName"])
clProfile.statusMessage = str(myProfile["statusMessage"])
clProfile.pictureStatus = str(myProfile["pictureStatus"])
cl.updateProfileAttribute(8, clProfile.pictureStatus)
cl.updateProfile(clProfile)
cl.sendMessage(msg.to, "Berhasil restore profile tunggu beberapa saat sampai profile berubah")
except:
cl.sendMessage(msg.to, "Gagal restore profile")
#==============================================================================#
elif msg.text.lower().startswith("mimicadd "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["mimic"]["target"][target] = True
cl.sendMessage(msg.to,"已加入模仿名單!")
break
except:
cl.sendMessage(msg.to,"添加失敗 !")
break
elif msg.text.lower().startswith("mimicdel "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["模仿名單"]["target"][target]
cl.sendMessage(msg.to,"刪除成功 !")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'mimiclist':
if settings["mimic"]["target"] == {}:
cl.sendMessage(msg.to,"未設定模仿目標")
else:
mc = "╔══[ Mimic List ]"
for mi_d in settings["mimic"]["target"]:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(msg.to,mc + "\n╚══[ Finish ]")
elif "mimic" in msg.text.lower():
sep = text.split(" ")
mic = text.replace(sep[0] + " ","")
if mic == "on":
if settings["mimic"]["status"] == False:
settings["mimic"]["status"] = True
cl.sendMessage(msg.to,"Reply Message on")
elif mic == "off":
if settings["mimic"]["status"] == True:
settings["mimic"]["status"] = False
cl.sendMessage(msg.to,"Reply Message off")
#==============================================================================#
elif text.lower() == 'groupcreator':
group = cl.getGroup(to)
GS = group.creator.mid
cl.sendContact(to, GS)
elif text.lower() == 'groupid':
gid = cl.getGroup(to)
cl.sendMessage(to, "[ID Group : ]\n" + gid.id)
elif text.lower() == 'grouppicture':
group = cl.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(to, path)
elif text.lower() == 'groupname':
gid = cl.getGroup(to)
cl.sendMessage(to, "[群組名稱 : ]\n" + gid.name)
elif text.lower() == 'grouplink':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = cl.reissueGroupTicket(to)
cl.sendMessage(to, "[ Group Ticket ]\nhttps://cl.me/R/ti/g/{}".format(str(ticket)))
else:
cl.sendMessage(to, "Grouplink未開啟 {}openlink".format(str(settings["keyCommand"])))
elif text.lower() == 'link off':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == False:
cl.sendMessage(to, "群組網址已關")
else:
group.preventedJoinByTicket = False
cl.updateGroup(group)
cl.sendMessage(to, "關閉成功")
elif text.lower() == 'link on':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == True:
cl.sendMessage(to, "群組網址已開")
else:
group.preventedJoinByTicket = True
cl.updateGroup(group)
cl.sendMessage(to, "開啟成功")
elif text.lower() == 'groupinfo':
group = cl.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "不明"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "關閉"
gTicket = "無"
else:
gQr = "開啟"
gTicket = "https://cl.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╔══[ Group Info ]"
ret_ += "\n╠ 群組名稱 : {}".format(str(group.name))
ret_ += "\n╠ 群組 Id : {}".format(group.id)
ret_ += "\n╠ 創建者 : {}".format(str(gCreator))
ret_ += "\n╠ 群組人數 : {}".format(str(len(group.members)))
ret_ += "\n╠ 邀請中 : {}".format(gPending)
ret_ += "\n╠ 網址狀態 : {}".format(gQr)
ret_ += "\n╠ 群組網址 : {}".format(gTicket)
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
cl.sendImageWithURL(to, path)
elif text.lower() == 'groupmemberlist':
if msg.toType == 2:
group = cl.getGroup(to)
ret_ = "╔══[ 成員名單 ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╚══[ 全部成員共 {} 人]".format(str(len(group.members)))
cl.sendMessage(to, str(ret_))
elif text.lower() == 'grouplist':
groups = cl.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = cl.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
cl.sendMessage(to, str(ret_))
elif msg.text.lower().startswith("nk "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.sendMessage(to,"Fuck you")
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendMessage(to,"Error")
elif msg.text.lower().startswith("ri "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.sendMessage(to,"來回機票一張ww")
cl.kickoutFromGroup(msg.to,[target])
cl.inviteIntoGroup(to,[target])
except:
cl.sendMessage(to,"Error")
elif text.lower() == 'nk':
if msg.toType == 2:
print ("[ 19 ] KICK ALL MEMBER")
_name = msg.text.replace("Byeall","")
gs = cl.getGroup(msg.to)
cl.sendMessage(msg.to,"Sorry guys")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendMessage(msg.to,"Not Found")
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendMessage(msg.to,"")
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendMessage(msg.to,"It can't be used besides the group.")
elif text.lower() == 'cancel':
if msg.toType == 2:
group = cl.getGroup(to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendMessage(msg.to,"已取消所有邀請!")
elif ("Inv " in msg.text):
if msg.toType == 2:
midd = msg.text.replace("Inv ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(to,[midd])
#==============================================================================#
elif text.lower() == 'tagall':
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//100
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*100 : (a+1)*100]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Alin \n'
cl.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
cl.sendMessage(to, "Total {} Mention".format(str(len(nama))))
elif text.lower() == 'sr':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read['readPoint']:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"偵測點已設置")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to, "Set reading point:\n" + readTime)
elif text.lower() == 'readcancel':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to not in read['readPoint']:
cl.sendMessage(msg.to,"偵測點已取消")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
cl.sendMessage(msg.to, "Delete reading point:\n" + readTime)
elif text.lower() == 'resetread':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read["readPoint"]:
try:
del read["readPoint"][msg.to]
del read["readMember"][msg.to]
del read["readTime"][msg.to]
except:
pass
cl.sendMessage(msg.to, "Reset reading point:\n" + readTime)
else:
cl.sendMessage(msg.to, "偵測點未設置?")
elif text.lower() == 'lr':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver in read['readPoint']:
if read["ROM"][receiver].items() == []:
cl.sendMessage(receiver,"[ 已讀的人 ]:\nNone")
else:
chiya = []
for rom in read["ROM"][receiver].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = '[ 已讀的人 ]:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@c\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+ zxc + "\n[ 已讀時間 ]: \n" + readTime
try:
cl.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as error:
print (error)
pass
else:
cl.sendMessage(receiver,"尚未設置偵測點")
#==============================================================================#
elif msg.text.lower().startswith("ban "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["blacklist"][target] = True
cl.sendMessage(msg.to,"已加入黑單!")
break
except:
cl.sendMessage(msg.to,"添加失敗 !")
break
elif msg.text.lower().startswith("unban "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["blacklist"][target]
cl.sendMessage(msg.to,"刪除成功 !")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'banlist':
if settings["blacklist"] == {}:
cl.sendMessage(msg.to,"無黑單成員!")
else:
mc = "╔══[ Black List ]"
for mi_d in settings["blacklist"]:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(msg.to,mc + "\n╚══[ Finish ]")
elif text.lower() == 'nkban':
if msg.toType == 2:
group = cl.getGroup(to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in settings["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendMessage(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
cl.sendMessage(msg.to,"Blacklist kicked out")
elif text.lower() == 'cleanban':
settings["blacklist"] == {ok}
for mi_d in settings["blacklist"]:
try:
del settings["blacklist"][mi_d]
cl.sendMessage(msg.to,"已清空黑單!")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'banmidlist':
if settings["blacklist"] == {}:
cl.sendMessage(msg.to,"無黑單成員!")
else:
mc = "╔══[ Black List ]"
for mi_d in settings["blacklist"]:
mc += "\n╠ "+mi_d
cl.sendMessage(to,mc + "\n╚══[ Finish ]")
#==============================================================================#
elif "Copy " in msg.text:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendMessage(to, "Success...")
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
P = contact.pictureStatus
pic = cl.getProfile()
pic.pictureStatus = P
cl.updateProfilePicture(P)
cl.cloneContactProfile(target)
except Exception as e:
cl.sendMessage(to, "Failed!")
elif text.lower() == 'cc9487':
if sender in ['ua10c2ad470b4b6e972954e1140ad1891']:
python = sys.executable
os.execl(python, python, *sys.argv)
else:
pass
#==============================================================================#
elif text.lower() == 'calender':
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
cl.sendMessage(msg.to, readTime)
elif "screenshotwebsite" in msg.text.lower():
sep = text.split(" ")
query = text.replace(sep[0] + " ","")
with requests.session() as web:
r = web.get("http://rahandiapi.herokuapp.com/sswebAPI?key=betakey&link={}".format(urllib.parse.quote(query)))
data = r.text
data = json.loads(data)
cl.sendImageWithURL(to, data["result"])
elif "checkdate" in msg.text.lower():
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
ret_ = "╔══[ D A T E ]"
ret_ += "\n╠ Date Of Birth : {}".format(str(data["data"]["lahir"]))
ret_ += "\n╠ Age : {}".format(str(data["data"]["usia"]))
ret_ += "\n╠ Birthday : {}".format(str(data["data"]["ultah"]))
ret_ += "\n╠ Zodiak : {}".format(str(data["data"]["zodiak"]))
ret_ += "\n╚══[ Success ]"
cl.sendMessage(to, str(ret_))
elif msg.contentType == 7:
if settings["checkSticker"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ STICKER ID : {}".format(stk_id)
ret_ += "\n╠ STICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ STICKER VERSION : {}".format(stk_ver)
ret_ += "\n╠ STICKER URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
elif msg.contentType == 13:
if settings["copy"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
targets = []
for s in groups.members:
if _name in s.displayName:
print ("[Target] Copy")
break
else:
targets.append(copy)
if targets == []:
cl.sendMessage(msg.to, "Not Found...")
pass
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah")
settings['copy'] = False
break
except:
msg.contentMetadata = {'mid': target}
settings["copy"] = False
break
#==============================================================================#
if op.type == 26:
print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if settings["autoRead"] == True:
cl.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
cl.sendMessage(msg.to,text)
if msg.contentType == 0 and sender not in clMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if clMID in mention["M"]:
if settings["detectMention"] == True:
contact = cl.getContact(sender)
cl.sendMessage(to, "sundala nu")
sendMessageWithMention(to, contact.mid)
break
#==============================================================================#
if op.type == 65:
print ("[ 65 ] REREAD")
try:
at = op.param1
msg_id = op.param2
if setting["reread"] == True:
if msg_id in msg_dict:
if msg_dict[msg_id]["from"] not in bl:
cl.sendMessage(at,"[收回訊息者]\n%s\n[訊息內容]\n%s"%(cl.getContact(msg_dict[msg_id]["from"]).displayName,msg_dict[msg_id]["text"]))
del msg_dict[msg_id]
else:
pass
except Exception as e:
print (e)
#==============================================================================#
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
try:
if op.param1 in read['readPoint']:
if op.param2 in read['readMember'][op.param1]:
pass
else:
read['readMember'][op.param1] += op.param2
read['ROM'][op.param1][op.param2] = op.param2
backupData()
else:
pass
except:
pass
except Exception as error:
logError(error)
#==============================================================================#
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
lineBot(op)
oepoll.setRevision(op.revision)
except Exception as e:
logError(e)
| 51.742829
| 168
| 0.404267
| 4,504
| 52,312
| 4.70071
| 0.132549
| 0.067542
| 0.035613
| 0.039108
| 0.508596
| 0.436992
| 0.400057
| 0.371717
| 0.340591
| 0.330389
| 0
| 0.011643
| 0.443436
| 52,312
| 1,010
| 169
| 51.794059
| 0.710287
| 0.037104
| 0
| 0.448598
| 0
| 0.001038
| 0.133908
| 0.006538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006231
| false
| 0.012461
| 0.005192
| 0
| 0.017653
| 0.014538
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9cf5fa54caecef97e6454178f438ce16bc99d7b
| 241
|
py
|
Python
|
fetch_data.py
|
bitfag/bt-macd-binance
|
eeffe52f8e561ff521629839078ff886e7bf700e
|
[
"MIT"
] | null | null | null |
fetch_data.py
|
bitfag/bt-macd-binance
|
eeffe52f8e561ff521629839078ff886e7bf700e
|
[
"MIT"
] | null | null | null |
fetch_data.py
|
bitfag/bt-macd-binance
|
eeffe52f8e561ff521629839078ff886e7bf700e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from btmacd.binance_fetcher import BinanceFetcher
def main():
fetcher = BinanceFetcher("BTCUSDT", filename="binance_ohlc.csv", start_date="01.01.2018")
fetcher.fetch()
if __name__ == "__main__":
main()
| 18.538462
| 93
| 0.705394
| 30
| 241
| 5.3
| 0.766667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039024
| 0.149378
| 241
| 12
| 94
| 20.083333
| 0.736585
| 0.082988
| 0
| 0
| 0
| 0
| 0.186364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d0d7e9fc82e29bf1385d169d21f03d43d467e2
| 25,508
|
py
|
Python
|
tensorflow_probability/python/mcmc/diagnostic.py
|
Frightera/probability
|
deac4562cbc1056e6abebc7450218d38444fe65d
|
[
"Apache-2.0"
] | 1
|
2022-03-06T15:37:18.000Z
|
2022-03-06T15:37:18.000Z
|
tensorflow_probability/python/mcmc/diagnostic.py
|
Frightera/probability
|
deac4562cbc1056e6abebc7450218d38444fe65d
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/mcmc/diagnostic.py
|
Frightera/probability
|
deac4562cbc1056e6abebc7450218d38444fe65d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for Markov Chain Monte Carlo (MCMC) sampling.
@@effective_sample_size
@@potential_scale_reduction
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import stats
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import nest_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'effective_sample_size',
'potential_scale_reduction',
]
def effective_sample_size(states,
filter_threshold=0.,
filter_beyond_lag=None,
filter_beyond_positive_pairs=False,
cross_chain_dims=None,
validate_args=False,
name=None):
"""Estimate a lower bound on effective sample size for each independent chain.
Roughly speaking, "effective sample size" (ESS) is the size of an iid sample
with the same variance as `state`.
More precisely, given a stationary sequence of possibly correlated random
variables `X_1, X_2, ..., X_N`, identically distributed, ESS is the
number such that
```
Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.
```
If the sequence is uncorrelated, `ESS = N`. If the sequence is positively
auto-correlated, `ESS` will be less than `N`. If there are negative
correlations, then `ESS` can exceed `N`.
Some math shows that, with `R_k` the auto-correlation sequence,
`R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have
```
ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]
```
This function estimates the above by first estimating the auto-correlation.
Since `R_k` must be estimated using only `N - k` samples, it becomes
progressively noisier for larger `k`. For this reason, the summation over
`R_k` should be truncated at some number `filter_beyond_lag < N`. This
function provides two methods to perform this truncation.
* `filter_threshold` -- since many MCMC methods generate chains where `R_k >
0`, a reasonable criterion is to truncate at the first index where the
estimated auto-correlation becomes negative. This method does not estimate
the `ESS` of super-efficient chains (where `ESS > N`) correctly.
* `filter_beyond_positive_pairs` -- reversible MCMC chains produce
an auto-correlation sequence with the property that pairwise sums of the
elements of that sequence are positive [Geyer][1], i.e.
`R_{2k} + R_{2k + 1} > 0` for `k in {0, ..., N/2}`. Deviations are only
possible due to noise. This method truncates the auto-correlation sequence
where the pairwise sums become non-positive.
The arguments `filter_beyond_lag`, `filter_threshold` and
`filter_beyond_positive_pairs` are filters intended to remove noisy tail terms
from `R_k`. You can combine `filter_beyond_lag` with `filter_threshold` or
`filter_beyond_positive_pairs. E.g., combining `filter_beyond_lag` and
`filter_beyond_positive_pairs` means that terms are removed if they were to be
filtered under the `filter_beyond_lag` OR `filter_beyond_positive_pairs`
criteria.
This function can also compute cross-chain ESS following
[Vehtari et al. (2019)][2] by specifying the `cross_chain_dims` argument.
Cross-chain ESS takes into account the cross-chain variance to reduce the ESS
in cases where the chains are not mixing well. In general, this will be a
smaller number than computing the ESS for individual chains and then summing
them. In an extreme case where the chains have fallen into K non-mixing modes,
this function will return ESS ~ K. Even when chains are mixing well it is
still preferrable to compute cross-chain ESS via this method because it will
reduce the noise in the estimate of `R_k`, reducing the need for truncation.
Args:
states: `Tensor` or Python structure of `Tensor` objects. Dimension zero
should index identically distributed states.
filter_threshold: `Tensor` or Python structure of `Tensor` objects. Must
broadcast with `state`. The sequence of auto-correlations is truncated
after the first appearance of a term less than `filter_threshold`.
Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,
setting to any number less than `-1` has the same effect. Ignored if
`filter_beyond_positive_pairs` is `True`.
filter_beyond_lag: `Tensor` or Python structure of `Tensor` objects. Must
be `int`-like and scalar valued. The sequence of auto-correlations is
truncated to this length. Setting to `None` means we do not filter based
on the size of lags.
filter_beyond_positive_pairs: Python boolean. If `True`, only consider the
initial auto-correlation sequence where the pairwise sums are positive.
cross_chain_dims: An integer `Tensor` or a structure of integer `Tensors`
corresponding to each state component. If a list of `states` is provided,
then this argument should also be a list of the same length. Which
dimensions of `states` to treat as independent chains that ESS will be
summed over. If `None`, no summation is performed. Note this requires at
least 2 chains.
validate_args: Whether to add runtime checks of argument validity. If False,
and arguments are incorrect, correct behavior is not guaranteed.
name: `String` name to prepend to created ops.
Returns:
ess: `Tensor` structure parallel to `states`. The effective sample size of
each component of `states`. If `cross_chain_dims` is None, the shape will
be `states.shape[1:]`. Otherwise, the shape is `tf.reduce_mean(states,
cross_chain_dims).shape[1:]`.
Raises:
ValueError: If `states` and `filter_threshold` or `states` and
`filter_beyond_lag` are both structures of different shapes.
ValueError: If `cross_chain_dims` is not `None` and there are less than 2
chains.
#### Examples
We use ESS to estimate standard error.
```
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 1000 states from one chain.
states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=tf.constant([0., 0.]),
trace_fn=None,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
states.shape
==> (1000, 2)
ess = effective_sample_size(states, filter_beyond_positive_pairs=True)
==> Shape (2,) Tensor
mean, variance = tf.nn.moments(states, axis=0)
standard_error = tf.sqrt(variance / ess)
```
#### References
[1]: Charles J. Geyer, Practical Markov chain Monte Carlo (with discussion).
Statistical Science, 7:473-511, 1992.
[2]: Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, Paul-Christian
Burkner. Rank-normalization, folding, and localization: An improved R-hat
for assessing convergence of MCMC, 2019. Retrieved from
http://arxiv.org/abs/1903.08008
"""
if cross_chain_dims is None:
cross_chain_dims = nest_util.broadcast_structure(states, None)
filter_beyond_lag = nest_util.broadcast_structure(states, filter_beyond_lag)
filter_threshold = nest_util.broadcast_structure(states, filter_threshold)
filter_beyond_positive_pairs = nest_util.broadcast_structure(
states, filter_beyond_positive_pairs)
# Process items, one at a time.
def single_state(*args):
return _effective_sample_size_single_state(
*args, validate_args=validate_args)
with tf.name_scope('effective_sample_size' if name is None else name):
return nest.map_structure_up_to(
states,
single_state,
states, filter_beyond_lag, filter_threshold,
filter_beyond_positive_pairs, cross_chain_dims)
def _effective_sample_size_single_state(states, filter_beyond_lag,
filter_threshold,
filter_beyond_positive_pairs,
cross_chain_dims,
validate_args):
"""ESS computation for one single Tensor argument."""
with tf.name_scope('effective_sample_size_single_state'):
states = tf.convert_to_tensor(states, name='states')
dt = states.dtype
# filter_beyond_lag == None ==> auto_corr is the full sequence.
auto_cov = stats.auto_correlation(
states, axis=0, max_lags=filter_beyond_lag, normalize=False)
n = _axis_size(states, axis=0)
if cross_chain_dims is not None:
num_chains = _axis_size(states, cross_chain_dims)
num_chains_ = tf.get_static_value(num_chains)
assertions = []
msg = ('When `cross_chain_dims` is not `None`, there must be > 1 chain '
'in `states`.')
if num_chains_ is not None:
if num_chains_ < 2:
raise ValueError(msg)
elif validate_args:
assertions.append(
assert_util.assert_greater(num_chains, 1., message=msg))
with tf.control_dependencies(assertions):
# We're computing the R[k] from equation 10 of Vehtari et al.
# (2019):
#
# R[k] := 1 - (W - 1/C * Sum_{c=1}^C s_c**2 R[k, c]) / (var^+),
#
# where:
# C := number of chains
# N := length of chains
# x_hat[c] := 1 / N Sum_{n=1}^N x[n, c], chain mean.
# x_hat := 1 / C Sum_{c=1}^C x_hat[c], overall mean.
# W := 1/C Sum_{c=1}^C s_c**2, within-chain variance.
# B := N / (C - 1) Sum_{c=1}^C (x_hat[c] - x_hat)**2, between chain
# variance.
# s_c**2 := 1 / (N - 1) Sum_{n=1}^N (x[n, c] - x_hat[c])**2, chain
# variance
# R[k, m] := auto_corr[k, m, ...], auto-correlation indexed by chain.
# var^+ := (N - 1) / N * W + B / N
cross_chain_dims = ps.non_negative_axis(
cross_chain_dims, ps.rank(states))
# B / N
between_chain_variance_div_n = _reduce_variance(
tf.reduce_mean(states, axis=0),
biased=False, # This makes the denominator be C - 1.
axis=cross_chain_dims - 1)
# W * (N - 1) / N
biased_within_chain_variance = tf.reduce_mean(auto_cov[0],
cross_chain_dims - 1)
# var^+
approx_variance = (
biased_within_chain_variance + between_chain_variance_div_n)
# 1/C * Sum_{c=1}^C s_c**2 R[k, c]
mean_auto_cov = tf.reduce_mean(auto_cov, cross_chain_dims)
auto_corr = 1. - (biased_within_chain_variance -
mean_auto_cov) / approx_variance
else:
auto_corr = auto_cov / auto_cov[:1]
num_chains = 1
# With R[k] := auto_corr[k, ...],
# ESS = N / {1 + 2 * Sum_{k=1}^N R[k] * (N - k) / N}
# = N / {-1 + 2 * Sum_{k=0}^N R[k] * (N - k) / N} (since R[0] = 1)
# approx N / {-1 + 2 * Sum_{k=0}^M R[k] * (N - k) / N}
# where M is the filter_beyond_lag truncation point chosen above.
# Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total
# ndims the same as auto_corr
k = tf.range(0., _axis_size(auto_corr, axis=0))
nk_factor = (n - k) / n
if tensorshape_util.rank(auto_corr.shape) is not None:
new_shape = [-1] + [1] * (tensorshape_util.rank(auto_corr.shape) - 1)
else:
new_shape = tf.concat(
([-1],
tf.ones([tf.rank(auto_corr) - 1], dtype=tf.int32)),
axis=0)
nk_factor = tf.reshape(nk_factor, new_shape)
weighted_auto_corr = nk_factor * auto_corr
if filter_beyond_positive_pairs:
def _sum_pairs(x):
x_len = ps.shape(x)[0]
# For odd sequences, we drop the final value.
x = x[:x_len - x_len % 2]
new_shape = ps.concat([[x_len // 2, 2], ps.shape(x)[1:]], axis=0)
return tf.reduce_sum(tf.reshape(x, new_shape), 1)
# Pairwise sums are all positive for auto-correlation spectra derived from
# reversible MCMC chains.
# E.g. imagine the pairwise sums are [0.2, 0.1, -0.1, -0.2]
# Step 1: mask = [False, False, True, True]
mask = _sum_pairs(auto_corr) < 0.
# Step 2: mask = [0, 0, 1, 1]
mask = tf.cast(mask, dt)
# Step 3: mask = [0, 0, 1, 2]
mask = tf.cumsum(mask, axis=0)
# Step 4: mask = [1, 1, 0, 0]
mask = tf.maximum(1. - mask, 0.)
# N.B. this reduces the length of weighted_auto_corr by a factor of 2.
# It still works fine in the formula below.
weighted_auto_corr = _sum_pairs(weighted_auto_corr) * mask
elif filter_threshold is not None:
filter_threshold = tf.convert_to_tensor(
filter_threshold, dtype=dt, name='filter_threshold')
# Get a binary mask to zero out values of auto_corr below the threshold.
# mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i,
# mask[i, ...] = 0, otherwise.
# So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...]
# Building step by step,
# Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2.
# Step 1: mask = [False, False, True, False]
mask = auto_corr < filter_threshold
# Step 2: mask = [0, 0, 1, 0]
mask = tf.cast(mask, dtype=dt)
# Step 3: mask = [0, 0, 1, 1]
mask = tf.cumsum(mask, axis=0)
# Step 4: mask = [1, 1, 0, 0]
mask = tf.maximum(1. - mask, 0.)
weighted_auto_corr *= mask
return num_chains * n / (-1 + 2 * tf.reduce_sum(weighted_auto_corr, axis=0))
def potential_scale_reduction(chains_states,
independent_chain_ndims=1,
split_chains=False,
validate_args=False,
name=None):
"""Gelman and Rubin (1992)'s potential scale reduction for chain convergence.
Given `N > 1` states from each of `C > 1` independent chains, the potential
scale reduction factor, commonly referred to as R-hat, measures convergence of
the chains (to the same target) by testing for equality of means.
Specifically, R-hat measures the degree to which variance (of the means)
between chains exceeds what one would expect if the chains were identically
distributed. See [Gelman and Rubin (1992)][1]; [Brooks and Gelman (1998)][2].
Some guidelines:
* The initial state of the chains should be drawn from a distribution
overdispersed with respect to the target.
* If all chains converge to the target, then as `N --> infinity`, R-hat --> 1.
Before that, R-hat > 1 (except in pathological cases, e.g. if the chain
paths were identical).
* The above holds for any number of chains `C > 1`. Increasing `C` does
improve effectiveness of the diagnostic.
* Sometimes, R-hat < 1.2 is used to indicate approximate convergence, but of
course this is problem-dependent. See [Brooks and Gelman (1998)][2].
* R-hat only measures non-convergence of the mean. If higher moments, or
other statistics are desired, a different diagnostic should be used. See
[Brooks and Gelman (1998)][2].
Args:
chains_states: `Tensor` or Python structure of `Tensor`s representing the
states of a Markov Chain at each result step. The `ith` state is
assumed to have shape `[Ni, Ci1, Ci2,...,CiD] + A`.
Dimension `0` indexes the `Ni > 1` result steps of the Markov Chain.
Dimensions `1` through `D` index the `Ci1 x ... x CiD` independent
chains to be tested for convergence to the same target.
The remaining dimensions, `A`, can have any shape (even empty).
independent_chain_ndims: Integer type `Tensor` with value `>= 1` giving the
number of dimensions, from `dim = 1` to `dim = D`, holding independent
chain results to be tested for convergence.
split_chains: Python `bool`. If `True`, divide samples from each chain into
first and second halves, treating these as separate chains. This makes
R-hat more robust to non-stationary chains, and is recommended in [3].
validate_args: Whether to add runtime checks of argument validity. If False,
and arguments are incorrect, correct behavior is not guaranteed.
name: `String` name to prepend to created tf. Default:
`potential_scale_reduction`.
Returns:
`Tensor` structure parallel to `chains_states` representing the
R-hat statistic for the state(s). Same `dtype` as `state`, and
shape equal to `state.shape[1 + independent_chain_ndims:]`.
Raises:
ValueError: If `independent_chain_ndims < 1`.
#### Examples
Diagnosing convergence by monitoring 10 chains that each attempt to
sample from a 2-variate normal.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 10 (2x) overdispersed initial states.
initial_state = target.sample(10) * 2.
==> (10, 2)
# Get 1000 samples from the 10 independent chains.
chains_states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=initial_state,
trace_fn=None,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
chains_states.shape
==> (1000, 10, 2)
rhat = tfp.mcmc.diagnostic.potential_scale_reduction(
chains_states, independent_chain_ndims=1)
# The second dimension needed a longer burn-in.
rhat.eval()
==> [1.05, 1.3]
```
To see why R-hat is reasonable, let `X` be a random variable drawn uniformly
from the combined states (combined over all chains). Then, in the limit
`N, C --> infinity`, with `E`, `Var` denoting expectation and variance,
```R-hat = ( E[Var[X | chain]] + Var[E[X | chain]] ) / E[Var[X | chain]].```
Using the law of total variance, the numerator is the variance of the combined
states, and the denominator is the total variance minus the variance of the
the individual chain means. If the chains are all drawing from the same
distribution, they will have the same mean, and thus the ratio should be one.
#### References
[1]: Stephen P. Brooks and Andrew Gelman. General Methods for Monitoring
Convergence of Iterative Simulations. _Journal of Computational and
Graphical Statistics_, 7(4), 1998.
[2]: Andrew Gelman and Donald B. Rubin. Inference from Iterative Simulation
Using Multiple Sequences. _Statistical Science_, 7(4):457-472, 1992.
[3]: Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, Paul-Christian
Burkner. Rank-normalization, folding, and localization: An improved R-hat
for assessing convergence of MCMC, 2019. Retrieved from
http://arxiv.org/abs/1903.08008
"""
# tf.get_static_value returns None iff a constant value (as a numpy
# array) is not efficiently computable. Therefore, we try constant_value then
# check for None.
icn_const_ = tf.get_static_value(
ps.convert_to_shape_tensor(independent_chain_ndims))
if icn_const_ is not None:
independent_chain_ndims = icn_const_
if icn_const_ < 1:
raise ValueError(
'Argument `independent_chain_ndims` must be `>= 1`, found: {}'.format(
independent_chain_ndims))
def single_state(s):
return _potential_scale_reduction_single_state(
s, independent_chain_ndims, split_chains, validate_args)
with tf.name_scope('potential_scale_reduction' if name is None else name):
return tf.nest.map_structure(single_state, chains_states)
def _potential_scale_reduction_single_state(state, independent_chain_ndims,
split_chains, validate_args):
"""potential_scale_reduction for one single state `Tensor`."""
# casting integers to floats for floating-point division
# check to see if the `state` is a numpy object for the numpy test suite
if dtype_util.as_numpy_dtype(state.dtype) is np.int64:
state = tf.cast(state, tf.float64)
elif dtype_util.is_integer(state.dtype):
state = tf.cast(state, tf.float32)
with tf.name_scope('potential_scale_reduction_single_state'):
# We assume exactly one leading dimension indexes e.g. correlated samples
# from each Markov chain.
state = tf.convert_to_tensor(state, name='state')
n_samples_ = tf.compat.dimension_value(state.shape[0])
if n_samples_ is not None: # If available statically.
if split_chains and n_samples_ < 4:
raise ValueError(
'Must provide at least 4 samples when splitting chains. '
'Found {}'.format(n_samples_))
if not split_chains and n_samples_ < 2:
raise ValueError(
'Must provide at least 2 samples. Found {}'.format(n_samples_))
elif validate_args:
if split_chains:
assertions = [assert_util.assert_greater(
ps.shape(state)[0], 4,
message='Must provide at least 4 samples when splitting chains.')]
with tf.control_dependencies(assertions):
state = tf.identity(state)
else:
assertions = [assert_util.assert_greater(
ps.shape(state)[0], 2,
message='Must provide at least 2 samples.')]
with tf.control_dependencies(assertions):
state = tf.identity(state)
# Define so it's not a magic number.
# Warning! `if split_chains` logic assumes this is 1!
sample_ndims = 1
if split_chains:
# Split the sample dimension in half, doubling the number of
# independent chains.
# For odd number of samples, keep all but the last sample.
state_shape = ps.shape(state)
n_samples = state_shape[0]
state = state[:n_samples - n_samples % 2]
# Suppose state = [0, 1, 2, 3, 4, 5]
# Step 1: reshape into [[0, 1, 2], [3, 4, 5]]
# E.g. reshape states of shape [a, b] into [2, a//2, b].
state = tf.reshape(
state,
ps.concat([[2, n_samples // 2], state_shape[1:]], axis=0)
)
# Step 2: Put the size `2` dimension in the right place to be treated as a
# chain, changing [[0, 1, 2], [3, 4, 5]] into [[0, 3], [1, 4], [2, 5]],
# reshaping [2, a//2, b] into [a//2, 2, b].
state = tf.transpose(
a=state,
perm=ps.concat(
[[1, 0], ps.range(2, ps.rank(state))], axis=0))
# We're treating the new dim as indexing 2 chains, so increment.
independent_chain_ndims += 1
sample_axis = ps.range(0, sample_ndims)
chain_axis = ps.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = ps.range(
0, sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of Brooks and Gelman (1998),
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
tf.reduce_mean(state, axis=sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = tf.reduce_mean(
_reduce_variance(state, sample_axis, keepdims=True, biased=False),
axis=sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = ((n - 1) / n) * w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
# TODO(b/72873233) Move some variant of this to tfd.sample_stats.
def _reduce_variance(x, axis=None, biased=True, keepdims=False):
with tf.name_scope('reduce_variance'):
x = tf.convert_to_tensor(x, name='x')
mean = tf.reduce_mean(x, axis=axis, keepdims=True)
biased_var = tf.reduce_mean(
tf.math.squared_difference(x, mean), axis=axis, keepdims=keepdims)
if biased:
return biased_var
n = _axis_size(x, axis)
return (n / (n - 1.)) * biased_var
def _axis_size(x, axis=None):
"""Get number of elements of `x` in `axis`, as type `x.dtype`."""
if axis is None:
return ps.cast(ps.size(x), x.dtype)
return ps.cast(
ps.reduce_prod(
ps.gather(ps.shape(x), axis)), x.dtype)
| 43.015177
| 85
| 0.663361
| 3,707
| 25,508
| 4.408417
| 0.180469
| 0.021295
| 0.01542
| 0.021417
| 0.282646
| 0.229225
| 0.187492
| 0.128809
| 0.120609
| 0.095766
| 0
| 0.022847
| 0.238121
| 25,508
| 592
| 86
| 43.087838
| 0.818051
| 0.604869
| 0
| 0.123153
| 0
| 0
| 0.055835
| 0.019799
| 0
| 0
| 0
| 0.001689
| 0.044335
| 1
| 0.044335
| false
| 0
| 0.059113
| 0.009852
| 0.157635
| 0.004926
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d2c04ffcb32d5c9ad6c0f626a368e22db97763
| 4,504
|
py
|
Python
|
tests/data/s3_scrape_config.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 3
|
2019-05-04T02:07:28.000Z
|
2020-10-16T17:47:44.000Z
|
tests/data/s3_scrape_config.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 604
|
2019-02-21T18:14:51.000Z
|
2022-02-10T08:13:54.000Z
|
tests/data/s3_scrape_config.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | null | null | null |
"""
This is an extract config intended for S3 object manifests produced by TBD.
To use it, you must import it in another extract config and override at least
the `source_data_url`. You may also append additional operations to the
`operations` list as well.
For example you could have the following in your extract config module:
from kf_ingest_packages.common.extract_configs.s3_object_info import *
source_data_url = 'file://../data/kf-seq-data-bcm-chung-s3-objects.tsv'
operations.append(
value_map(
in_col='Key',
out_col=CONCEPT.BIOSPECIMEN.ID,
m=lambda x: x
)
)
"""
import os
from kf_lib_data_ingest.common import constants
from kf_lib_data_ingest.common.constants import GENOMIC_FILE
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_lib_data_ingest.etl.extract.operations import (
keep_map,
row_map,
value_map,
constant_map,
)
def file_ext(x):
"""
Get genomic file extension
"""
matches = [
file_ext for file_ext in FILE_EXT_FORMAT_MAP if x.endswith(file_ext)
]
if matches:
file_ext = max(matches, key=len)
else:
file_ext = None
return file_ext
FILE_EXT_FORMAT_MAP = {
"fq": GENOMIC_FILE.FORMAT.FASTQ,
"fastq": GENOMIC_FILE.FORMAT.FASTQ,
"fq.gz": GENOMIC_FILE.FORMAT.FASTQ,
"fastq.gz": GENOMIC_FILE.FORMAT.FASTQ,
"bam": GENOMIC_FILE.FORMAT.BAM,
"hgv.bam": GENOMIC_FILE.FORMAT.BAM,
"cram": GENOMIC_FILE.FORMAT.CRAM,
"bam.bai": GENOMIC_FILE.FORMAT.BAI,
"bai": GENOMIC_FILE.FORMAT.BAI,
"cram.crai": GENOMIC_FILE.FORMAT.CRAI,
"crai": GENOMIC_FILE.FORMAT.CRAI,
"g.vcf.gz": GENOMIC_FILE.FORMAT.GVCF,
"g.vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI,
"vcf.gz": GENOMIC_FILE.FORMAT.VCF,
"vcf": GENOMIC_FILE.FORMAT.VCF,
"vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI,
"peddy.html": "html",
}
DATA_TYPES = {
GENOMIC_FILE.FORMAT.FASTQ: GENOMIC_FILE.DATA_TYPE.UNALIGNED_READS,
GENOMIC_FILE.FORMAT.BAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS,
GENOMIC_FILE.FORMAT.CRAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS,
GENOMIC_FILE.FORMAT.BAI: "Aligned Reads Index",
GENOMIC_FILE.FORMAT.CRAI: "Aligned Reads Index",
GENOMIC_FILE.FORMAT.VCF: "Variant Calls",
GENOMIC_FILE.FORMAT.GVCF: "gVCF",
"g.vcf.gz.tbi": "gVCF Index",
"vcf.gz.tbi": "Variant Calls Index",
"html": "Other",
}
def filter_df_by_file_ext(df):
"""
Only keep rows where file extension is one of those in
FILE_EXT_FORMAT_MAP.keys
"""
df[CONCEPT.GENOMIC_FILE.FILE_FORMAT] = df["Key"].apply(
lambda x: file_format(x)
)
return df[df[CONCEPT.GENOMIC_FILE.FILE_FORMAT].notnull()]
source_data_url = (
'https://localhost:5002/download/study/SD_ME0WME0W/'
'file/SF_Y1JMXTTS/version/FV_4RYEMD71'
)
do_after_read = filter_df_by_file_ext
def s3_url(row):
"""
Create S3 URL for object from S3 bucket and key
"""
return f's3://{row["Bucket"]}/{row["Key"]}'
def file_format(x):
"""
Get genomic file format by looking genomic file ext up in
FILE_EXT_FORMAT_MAP dict
"""
# File format
return FILE_EXT_FORMAT_MAP.get(file_ext(x))
def data_type(x):
"""
Get genomic file data type by looking up file format in DATA_TYPES.
However, if the file's extension has `tbi` in it, then use the file
extension itself to do the data type lookup.
"""
ext = file_ext(x)
if "tbi" in ext:
data_type = DATA_TYPES.get(ext)
else:
data_type = DATA_TYPES.get(file_format(x))
return data_type
operations = [
row_map(out_col=CONCEPT.GENOMIC_FILE.ID, m=lambda row: s3_url(row)),
row_map(
out_col=CONCEPT.GENOMIC_FILE.URL_LIST, m=lambda row: [s3_url(row)]
),
value_map(
in_col="Key",
out_col=CONCEPT.GENOMIC_FILE.FILE_NAME,
m=lambda x: os.path.split(x)[-1],
),
keep_map(in_col="Size", out_col=CONCEPT.GENOMIC_FILE.SIZE),
value_map(
in_col="ETag",
out_col=CONCEPT.GENOMIC_FILE.HASH_DICT,
m=lambda x: {constants.FILE.HASH.S3_ETAG.lower(): x.replace('"', "")},
),
constant_map(
out_col=CONCEPT.GENOMIC_FILE.AVAILABILITY,
m=constants.GENOMIC_FILE.AVAILABILITY.IMMEDIATE,
),
keep_map(
in_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT,
out_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT,
),
value_map(
in_col="Key",
out_col=CONCEPT.GENOMIC_FILE.DATA_TYPE,
m=lambda x: data_type(x),
),
]
| 27.463415
| 78
| 0.67984
| 672
| 4,504
| 4.321429
| 0.239583
| 0.162879
| 0.140496
| 0.065083
| 0.399793
| 0.225895
| 0.110537
| 0.070592
| 0.060606
| 0.027548
| 0
| 0.005843
| 0.202043
| 4,504
| 163
| 79
| 27.631902
| 0.80217
| 0.228686
| 0
| 0.128713
| 0
| 0
| 0.108148
| 0.020444
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049505
| false
| 0
| 0.049505
| 0
| 0.148515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d3222fd93bbc8ba199ba7a401394dc7531a2ff
| 665
|
py
|
Python
|
hard-gists/5c973ec1b5ab2e387646/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/5c973ec1b5ab2e387646/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/5c973ec1b5ab2e387646/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
import bpy
from bpy.app.handlers import persistent
bl_info = {
"name": "Playback Once",
"author": "Adhi Hargo",
"version": (1, 0, 0),
"blender": (2, 67, 3),
"location": "",
"description": "Playback once.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Animation"}
@persistent
def stopPlaybackAtEnd(scene):
if scene.frame_current >= scene.frame_end:
bpy.ops.screen.animation_cancel()
def register():
bpy.app.handlers.frame_change_pre.append(stopPlaybackAtEnd)
def unregister():
bpy.app.handlers.frame_change_pre.remove(stopPlaybackAtEnd)
if __name__ == "__main__":
register()
| 22.931034
| 63
| 0.645113
| 75
| 665
| 5.48
| 0.613333
| 0.043796
| 0.10219
| 0.092457
| 0.136253
| 0.136253
| 0
| 0
| 0
| 0
| 0
| 0.013035
| 0.192481
| 665
| 28
| 64
| 23.75
| 0.752328
| 0
| 0
| 0
| 0
| 0
| 0.196992
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.086957
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d47acd47b8bd0babe955a7bbbde7c4d9080b36
| 688
|
py
|
Python
|
Py3Challenges/saves/challenges/c6_min.py
|
AlbertUnruh/Py3Challenges
|
52f03f157860f6464f0c1710bf051a8099c29ea2
|
[
"MIT"
] | 2
|
2022-02-13T04:57:10.000Z
|
2022-02-13T10:40:14.000Z
|
Py3Challenges/saves/challenges/c6_min.py
|
AlbertUnruh/Py3Challenges
|
52f03f157860f6464f0c1710bf051a8099c29ea2
|
[
"MIT"
] | null | null | null |
Py3Challenges/saves/challenges/c6_min.py
|
AlbertUnruh/Py3Challenges
|
52f03f157860f6464f0c1710bf051a8099c29ea2
|
[
"MIT"
] | null | null | null |
"""
To master this you should consider using the builtin-``min``-function.
"""
from ...challenge import Challenge
from random import randint
x = []
for _ in range(randint(2, 10)):
x.append(randint(1, 100))
intro = f"You have to print the lowest value of {', '.join(str(_) for _ in x[:-1])} and {x[-1]}. (values: x)"
def validate_function(stdin: str, stdout: str, stderr: str, exc: tuple) -> bool:
try:
z = int(stdout.removesuffix("\n"))
except ValueError:
return False
else:
return min(x) == z
challenge = Challenge(
intro=intro,
validate_function=validate_function,
help=__doc__,
values={"x": x},
capture_stdout=True,
)
| 22.193548
| 109
| 0.632267
| 95
| 688
| 4.463158
| 0.610526
| 0.113208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016729
| 0.218023
| 688
| 30
| 110
| 22.933333
| 0.771375
| 0.101744
| 0
| 0
| 0
| 0.05
| 0.165574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.25
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d600352f466e38045c7614f4b0151d5eb8f878
| 4,625
|
py
|
Python
|
services/web/server/tests/unit/with_dbs/01/test_director_v2.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | null | null | null |
services/web/server/tests/unit/with_dbs/01/test_director_v2.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | 1
|
2021-11-29T13:38:09.000Z
|
2021-11-29T13:38:09.000Z
|
services/web/server/tests/unit/with_dbs/01/test_director_v2.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | null | null | null |
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
from typing import AsyncIterator
import pytest
from aioresponses import aioresponses
from faker import Faker
from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st
from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_pipeline import ComputationTask
from models_library.projects_state import RunningState
from models_library.users import UserID
from simcore_service_webserver import director_v2_api
from simcore_service_webserver.director_v2_models import (
ClusterCreate,
ClusterPatch,
ClusterPing,
)
@pytest.fixture()
async def mocked_director_v2(
director_v2_service_mock: aioresponses,
) -> AsyncIterator[aioresponses]:
yield director_v2_service_mock
@pytest.fixture
def user_id(faker: Faker) -> UserID:
return UserID(faker.pyint(min_value=1))
@pytest.fixture
def project_id(faker: Faker) -> ProjectID:
return ProjectID(faker.uuid4())
@pytest.fixture
def cluster_id(faker: Faker) -> ClusterID:
return ClusterID(faker.pyint(min_value=0))
async def test_create_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
task_out = await director_v2_api.create_or_update_pipeline(
client.app, user_id, project_id
)
assert task_out
assert isinstance(task_out, dict)
assert task_out["state"] == RunningState.NOT_STARTED
async def test_get_computation_task(
mocked_director_v2,
client,
user_id: UserID,
project_id: ProjectID,
):
task_out = await director_v2_api.get_computation_task(
client.app, user_id, project_id
)
assert task_out
assert isinstance(task_out, ComputationTask)
assert task_out.state == RunningState.NOT_STARTED
async def test_delete_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
await director_v2_api.delete_pipeline(client.app, user_id, project_id)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_create=st.builds(ClusterCreate))
async def test_create_cluster(
mocked_director_v2, client, user_id: UserID, cluster_create
):
created_cluster = await director_v2_api.create_cluster(
client.app, user_id=user_id, new_cluster=cluster_create
)
assert created_cluster is not None
assert isinstance(created_cluster, dict)
assert "id" in created_cluster
async def test_list_clusters(mocked_director_v2, client, user_id: UserID):
list_of_clusters = await director_v2_api.list_clusters(client.app, user_id=user_id)
assert isinstance(list_of_clusters, list)
assert len(list_of_clusters) > 0
async def test_get_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
cluster = await director_v2_api.get_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
assert isinstance(cluster, dict)
assert cluster["id"] == cluster_id
async def test_get_cluster_details(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
cluster_details = await director_v2_api.get_cluster_details(
client.app, user_id=user_id, cluster_id=cluster_id
)
assert isinstance(cluster_details, dict)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_patch=st.from_type(ClusterPatch))
async def test_update_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID, cluster_patch
):
print(f"--> updating cluster with {cluster_patch=}")
updated_cluster = await director_v2_api.update_cluster(
client.app, user_id=user_id, cluster_id=cluster_id, cluster_patch=cluster_patch
)
assert isinstance(updated_cluster, dict)
assert updated_cluster["id"] == cluster_id
async def test_delete_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
await director_v2_api.delete_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_ping=st.builds(ClusterPing))
async def test_ping_cluster(mocked_director_v2, client, cluster_ping: ClusterPing):
await director_v2_api.ping_cluster(client.app, cluster_ping=cluster_ping)
async def test_ping_specific_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
await director_v2_api.ping_specific_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
| 30.833333
| 87
| 0.780973
| 626
| 4,625
| 5.428115
| 0.161342
| 0.049441
| 0.045909
| 0.071218
| 0.508534
| 0.457034
| 0.436433
| 0.389347
| 0.389347
| 0.375221
| 0
| 0.007818
| 0.142703
| 4,625
| 149
| 88
| 31.040268
| 0.84918
| 0.020973
| 0
| 0.256637
| 0
| 0
| 0.011715
| 0
| 0
| 0
| 0
| 0
| 0.141593
| 1
| 0.026549
| false
| 0
| 0.115044
| 0.026549
| 0.168142
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d6dd8bd3445675e1356c10ac0bb61cd00aba81
| 3,027
|
py
|
Python
|
generator.py
|
Geoalert/emergency-mapping
|
96668e4e5aa2b520e5727536f7a8f4c262ee3da6
|
[
"MIT"
] | 3
|
2018-04-04T17:58:53.000Z
|
2021-10-14T08:50:13.000Z
|
generator.py
|
aeronetlab/map_augury
|
96668e4e5aa2b520e5727536f7a8f4c262ee3da6
|
[
"MIT"
] | null | null | null |
generator.py
|
aeronetlab/map_augury
|
96668e4e5aa2b520e5727536f7a8f4c262ee3da6
|
[
"MIT"
] | 1
|
2020-03-24T12:07:07.000Z
|
2020-03-24T12:07:07.000Z
|
import numpy as np
def random_augmentation(img, mask):
#you can add any augmentations you need
return img, mask
def batch_generator(image, mask,
batch_size=1,
crop_size=0,
patch_size=256,
bbox= None,
augmentation=False):
'''
image: nparray, must have 3 dimension
mask: nparray, 2 dimensions, same size as image
batch_size: int, number of images in a batch
patch_size: int, size of the image returned, patch is square
crop_size: int, how much pixels should be cropped off the mask
bbox: None or tuple of 4 ints, (min_y, max_y, min_x, max_x), the data is selected from within the bbox
augmentation: turn on/off data augmentation. The augmentation function is random_augmentation() above
returns batch of image and mask patches, image is turned to 'channels last' as required by unet
'''
if np.ndim(mask) != 2 or np.ndim(image) != 3:
raise ValueError('image must have 3 dims and mask 2 dims')
if mask.shape != image.shape[1:]:
raise ValueError('image and mask shape is different')
im_max = float(np.max(image))
mask_max = 1.0
#select subimage
if bbox is not None:
# check bbox
if bbox[0] < 0 or bbox [2] < 0 \
or bbox[1] > mask.shape[0] or bbox[3] > mask.shape[0] \
or bbox[0] + patch_size > bbox[1] or bbox[2] + patch_size > bbox[3] \
or patch_size <= 0:
raise ValueError("Incorrect bbox or patch size")
img_ = image[:, bbox[0] : bbox[1], bbox[2]:bbox[3]]
mask_ = mask[bbox[0] : bbox[1], bbox[2]:bbox[3]]
else:
img_ = image
mask_ = mask
while 1:
x = []
y = []
for i in range (batch_size):
random_x = np.random.randint(0, mask_.shape[1] - patch_size)
random_y = np.random.randint(0, mask_.shape[0] - patch_size)
img_patch = img_[:,
random_y : random_y + patch_size,
random_x : random_x + patch_size] / im_max
# transform the image from channels-first (rasterio format) to channels-last (default tensorflow format)
img_patch = np.moveaxis(img_patch, 0, 2)
mask_patch = mask_[random_y : random_y + patch_size,
random_x : random_x + patch_size] / mask_max
if augmentation:
img_patch, mask_patch = random_augmentation(img_patch, mask_patch)
# mask is cropped as it may be useful for some convnets that have output size less than input
if crop_size > 0:
mask_patch = mask_patch[crop_size : -crop_size,
crop_size : -crop_size]
mask_patch = np.expand_dims(mask_patch, 2)
x.append(img_patch)
y.append(mask_patch)
yield (np.array(x), np.array(y))
| 40.905405
| 116
| 0.570202
| 417
| 3,027
| 3.980815
| 0.28777
| 0.06506
| 0.016867
| 0.028916
| 0.183133
| 0.128916
| 0.079518
| 0.079518
| 0.055422
| 0.055422
| 0
| 0.022602
| 0.342253
| 3,027
| 73
| 117
| 41.465753
| 0.81115
| 0.269574
| 0
| 0
| 0
| 0
| 0.045855
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.021277
| 0.021277
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d71e12c5fdd4a3220a64251c8e0e2c9a302fe4
| 13,351
|
py
|
Python
|
awx/api/metadata.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 1
|
2021-09-07T14:53:57.000Z
|
2021-09-07T14:53:57.000Z
|
awx/api/metadata.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 2
|
2020-02-04T05:01:38.000Z
|
2020-02-18T06:44:52.000Z
|
awx/api/metadata.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 1
|
2020-01-28T05:34:09.000Z
|
2020-01-28T05:34:09.000Z
|
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
from collections import OrderedDict
# Django
from django.core.exceptions import PermissionDenied
from django.db.models.fields import PositiveIntegerField, BooleanField
from django.db.models.fields.related import ForeignKey
from django.http import Http404
from django.utils.encoding import force_text, smart_text
from django.utils.translation import ugettext_lazy as _
# Django REST Framework
from rest_framework import exceptions
from rest_framework import metadata
from rest_framework import serializers
from rest_framework.relations import RelatedField, ManyRelatedField
from rest_framework.fields import JSONField as DRFJSONField
from rest_framework.request import clone_request
# AWX
from awx.main.fields import JSONField, ImplicitRoleField
from awx.main.models import InventorySource, NotificationTemplate
from awx.main.scheduler.kubernetes import PodManager
class Metadata(metadata.SimpleMetadata):
def get_field_info(self, field):
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False)
text_attrs = [
'read_only', 'label', 'help_text',
'min_length', 'max_length',
'min_value', 'max_value',
'category', 'category_slug',
'defined_in_file'
]
for attr in text_attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_text(value, strings_only=True)
placeholder = getattr(field, 'placeholder', serializers.empty)
if placeholder is not serializers.empty:
field_info['placeholder'] = placeholder
serializer = getattr(field, 'parent', None)
if serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
# Update help text for common fields.
field_help_text = {
'id': _('Database ID for this {}.'),
'name': _('Name of this {}.'),
'description': _('Optional description of this {}.'),
'type': _('Data type for this {}.'),
'url': _('URL for this {}.'),
'related': _('Data structure with URLs of related resources.'),
'summary_fields': _('Data structure with name/description for related resources.'),
'created': _('Timestamp when this {} was created.'),
'modified': _('Timestamp when this {} was last modified.'),
}
if field.field_name in field_help_text:
opts = serializer.Meta.model._meta.concrete_model._meta
verbose_name = smart_text(opts.verbose_name)
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
if field.field_name == 'type':
field_info['filterable'] = True
else:
for model_field in serializer.Meta.model._meta.fields:
if field.field_name == model_field.name:
if getattr(model_field, '__accepts_json__', None):
field_info['type'] = 'json'
field_info['filterable'] = True
break
else:
field_info['filterable'] = False
# Indicate if a field has a default value.
# FIXME: Still isn't showing all default values?
try:
default = field.get_default()
if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost':
default = '{}://{}'.format(self.request.scheme, self.request.get_host())
field_info['default'] = default
except serializers.SkipField:
pass
if getattr(field, 'child', None):
field_info['child'] = self.get_field_info(field.child)
elif getattr(field, 'fields', None):
field_info['children'] = self.get_serializer_info(field)
if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'):
field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()]
# Indicate if a field is write-only.
if getattr(field, 'write_only', False):
field_info['write_only'] = True
# Special handling of inventory source_region choices that vary based on
# selected inventory source.
if field.field_name == 'source_regions':
for cp in ('azure_rm', 'ec2', 'gce'):
get_regions = getattr(InventorySource, 'get_%s_region_choices' % cp)
field_info['%s_region_choices' % cp] = get_regions()
# Special handling of group_by choices for EC2.
if field.field_name == 'group_by':
for cp in ('ec2',):
get_group_by_choices = getattr(InventorySource, 'get_%s_group_by_choices' % cp)
field_info['%s_group_by_choices' % cp] = get_group_by_choices()
# Special handling of notification configuration where the required properties
# are conditional on the type selected.
if field.field_name == 'notification_configuration':
for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES:
field_info[notification_type_name] = notification_type_class.init_parameters
# Special handling of notification messages where the required properties
# are conditional on the type selected.
try:
view_model = field.context['view'].model
except (AttributeError, KeyError):
view_model = None
if view_model == NotificationTemplate and field.field_name == 'messages':
for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES:
field_info[notification_type_name] = notification_type_class.default_messages
# Update type of fields returned...
model_field = None
if serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
try:
model_field = serializer.Meta.model._meta.get_field(field.field_name)
except Exception:
pass
if field.field_name == 'type':
field_info['type'] = 'choice'
elif field.field_name in ('url', 'custom_virtualenv', 'token'):
field_info['type'] = 'string'
elif field.field_name in ('related', 'summary_fields'):
field_info['type'] = 'object'
elif isinstance(field, PositiveIntegerField):
field_info['type'] = 'integer'
elif field.field_name in ('created', 'modified'):
field_info['type'] = 'datetime'
elif (
RelatedField in field.__class__.__bases__ or
isinstance(model_field, ForeignKey)
):
field_info['type'] = 'id'
elif (
isinstance(field, JSONField) or
isinstance(model_field, JSONField) or
isinstance(field, DRFJSONField) or
isinstance(getattr(field, 'model_field', None), JSONField) or
field.field_name == 'credential_passwords'
):
field_info['type'] = 'json'
elif (
isinstance(field, ManyRelatedField) and
field.field_name == 'credentials'
# launch-time credentials
):
field_info['type'] = 'list_of_ids'
elif isinstance(model_field, BooleanField):
field_info['type'] = 'boolean'
return field_info
def get_serializer_info(self, serializer, method=None):
filterer = getattr(serializer, 'filter_field_metadata', lambda fields, method: fields)
return filterer(
super(Metadata, self).get_serializer_info(serializer),
method
)
def determine_actions(self, request, view):
# Add field information for GET requests (so field names/labels are
# available even when we can't POST/PUT).
actions = {}
for method in {'GET', 'PUT', 'POST'} & set(view.allowed_methods):
view.request = clone_request(request, method)
obj = None
try:
# Test global permissions
if hasattr(view, 'check_permissions'):
view.check_permissions(view.request)
# Test object permissions
if method == 'PUT' and hasattr(view, 'get_object'):
obj = view.get_object()
except (exceptions.APIException, PermissionDenied, Http404):
continue
else:
# If user has appropriate permissions for the view, include
# appropriate metadata about the fields that should be supplied.
serializer = view.get_serializer(instance=obj)
actions[method] = self.get_serializer_info(serializer, method=method)
finally:
view.request = request
for field, meta in list(actions[method].items()):
if not isinstance(meta, dict):
continue
if field == "pod_spec_override":
meta['default'] = PodManager().pod_definition
# Add type choices if available from the serializer.
if field == 'type' and hasattr(serializer, 'get_type_choices'):
meta['choices'] = serializer.get_type_choices()
# For GET method, remove meta attributes that aren't relevant
# when reading a field and remove write-only fields.
if method == 'GET':
attrs_to_remove = ('required', 'read_only', 'default', 'min_length', 'max_length', 'placeholder')
for attr in attrs_to_remove:
meta.pop(attr, None)
meta.get('child', {}).pop(attr, None)
if meta.pop('write_only', False):
actions['GET'].pop(field)
# For PUT/POST methods, remove read-only fields.
if method in ('PUT', 'POST'):
# This value should always be False for PUT/POST, so don't
# show it (file-based read-only settings can't be updated)
meta.pop('defined_in_file', False)
if meta.pop('read_only', False):
if field == 'id' and hasattr(view, 'attach'):
continue
actions[method].pop(field)
return actions
def determine_metadata(self, request, view):
# store request on self so we can use it to generate field defaults
# (such as TOWER_URL_BASE)
self.request = request
try:
setattr(view, '_request', request)
metadata = super(Metadata, self).determine_metadata(request, view)
finally:
delattr(view, '_request')
# Add type(s) handled by this view/serializer.
if hasattr(view, 'get_serializer'):
serializer = view.get_serializer()
if hasattr(serializer, 'get_types'):
metadata['types'] = serializer.get_types()
# Add search fields if available from the view.
if getattr(view, 'search_fields', None):
metadata['search_fields'] = view.search_fields
# Add related search fields if available from the view.
if getattr(view, 'related_search_fields', None):
metadata['related_search_fields'] = view.related_search_fields
# include role names in metadata
roles = []
model = getattr(view, 'model', None)
if model:
for field in model._meta.get_fields():
if type(field) is ImplicitRoleField:
roles.append(field.name)
if len(roles) > 0:
metadata['object_roles'] = roles
from rest_framework import generics
if isinstance(view, generics.ListAPIView) and hasattr(view, 'paginator'):
metadata['max_page_size'] = view.paginator.max_page_size
return metadata
class RoleMetadata(Metadata):
def determine_metadata(self, request, view):
metadata = super(RoleMetadata, self).determine_metadata(request, view)
if 'actions' in metadata:
metadata['actions'].pop('POST')
metadata['actions']['POST'] = {
"id": {"type": "integer", "label": "ID", "help_text": "Database ID for this role."},
"disassociate": {"type": "integer", "label": "Disassociate", "help_text": "Provide to remove this role."},
}
return metadata
class SublistAttachDetatchMetadata(Metadata):
def determine_actions(self, request, view):
actions = super(SublistAttachDetatchMetadata, self).determine_actions(request, view)
method = 'POST'
if method in actions:
for field in list(actions[method].keys()):
if field == 'id':
continue
actions[method].pop(field)
return actions
| 43.630719
| 131
| 0.601004
| 1,430
| 13,351
| 5.429371
| 0.202797
| 0.035935
| 0.028851
| 0.016486
| 0.159196
| 0.129057
| 0.101752
| 0.083462
| 0.083462
| 0.083462
| 0
| 0.001503
| 0.302374
| 13,351
| 305
| 132
| 43.77377
| 0.832081
| 0.115122
| 0
| 0.1875
| 0
| 0
| 0.134669
| 0.011293
| 0
| 0
| 0
| 0.003279
| 0
| 1
| 0.026786
| false
| 0.013393
| 0.075893
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d7834f2dd39b0c5b6da30b8ebfe19e7026adeb
| 1,985
|
py
|
Python
|
plugins/python/tasks.py
|
BBVA/deeptracy
|
40f4b6bba2bdd345e95e42d474c05fa90f15c3e9
|
[
"Apache-1.1"
] | 85
|
2017-09-22T10:48:51.000Z
|
2021-06-11T18:33:28.000Z
|
plugins/python/tasks.py
|
BBVA/deeptracy
|
40f4b6bba2bdd345e95e42d474c05fa90f15c3e9
|
[
"Apache-1.1"
] | 51
|
2017-10-17T10:16:16.000Z
|
2020-08-29T23:10:21.000Z
|
plugins/python/tasks.py
|
BBVA/deeptracy
|
40f4b6bba2bdd345e95e42d474c05fa90f15c3e9
|
[
"Apache-1.1"
] | 14
|
2017-11-20T10:20:16.000Z
|
2021-02-02T21:35:07.000Z
|
import json
from washer.worker.actions import AppendStdout, AppendStderr
from washer.worker.actions import CreateNamedLog, AppendToLog
from washer.worker.actions import SetProperty
from washer.worker.commands import washertask
def pipenv_graph2deps(rawgraph):
graph = json.loads(rawgraph)
def build_entry(data):
if 'required_version' in data:
spec = data['key'] + data['required_version']
else:
spec = data['key']
return {'installer': 'pipenv',
'spec': spec,
'source': 'pypi',
'name': data['package_name'],
'version': data['installed_version']}
def extract_dependencies(entries):
for entry in entries:
if 'package' in entry:
package = entry['package']
dependencies = entry.get('dependencies', [])
yield build_entry(package)
yield from extract_dependencies(dependencies)
else:
yield build_entry(entry)
yield from extract_dependencies(graph)
@washertask
def pip_install(repopath, path=".", **kwargs):
import invoke
c = invoke.Context()
with c.cd(repopath):
with c.cd(path):
res = c.run("pipenv install .")
deps = c.run("pipenv graph --json")
yield AppendStdout(res.stdout)
yield AppendStderr(res.stderr)
yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout)))
return True
@washertask
def requirement_file(repopath, requirement="requirements.txt",
path=".", **kwargs):
import invoke
c = invoke.Context()
with c.cd(repopath):
with c.cd(path):
res = c.run("pipenv install -r %s" % requirement)
deps = c.run("pipenv graph --json")
yield AppendStdout(res.stdout)
yield AppendStderr(res.stderr)
yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout)))
return True
| 28.357143
| 75
| 0.614106
| 212
| 1,985
| 5.679245
| 0.306604
| 0.033223
| 0.053156
| 0.057309
| 0.446013
| 0.373754
| 0.373754
| 0.373754
| 0.373754
| 0.373754
| 0
| 0.002086
| 0.275567
| 1,985
| 69
| 76
| 28.768116
| 0.835188
| 0
| 0
| 0.423077
| 0
| 0
| 0.125441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.134615
| 0
| 0.288462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d84b2b4c7d4cbbbf84bcb2ee37459c480a1a5e
| 715
|
py
|
Python
|
senity/utils/getSiteProfile.py
|
pkokkinos/senity
|
c6e41678620bef558cc3600929a8320ff2a285cf
|
[
"MIT"
] | 1
|
2017-10-26T12:30:04.000Z
|
2017-10-26T12:30:04.000Z
|
senity/utils/getSiteProfile.py
|
pkokkinos/senity
|
c6e41678620bef558cc3600929a8320ff2a285cf
|
[
"MIT"
] | null | null | null |
senity/utils/getSiteProfile.py
|
pkokkinos/senity
|
c6e41678620bef558cc3600929a8320ff2a285cf
|
[
"MIT"
] | null | null | null |
import json
import os
# get site profile
def getSiteProfile(site_file):
with open(site_file) as json_file:
json_data = json.load(json_file)
return json_data
# get all site profile
def getAllSiteProfiles(site_folder):
allSiteProfiles = {}
allSiteFiles = os.listdir(site_folder)
for sf in allSiteFiles:
sp = getSiteProfile(site_folder + "/" + sf)
allSiteProfiles[sp["siteName"]] = []
for device in sp["devicesAvailable"]:
for i in range(device["deviceCounter"]):
allSiteProfiles[sp["siteName"]].append(device["deviceName"])
return allSiteProfiles
#sites_folder = "sites"
#print getAllSiteProfiles(sites_folder)
| 23.833333
| 77
| 0.664336
| 79
| 715
| 5.873418
| 0.443038
| 0.064655
| 0.060345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234965
| 715
| 29
| 78
| 24.655172
| 0.848263
| 0.135664
| 0
| 0
| 0
| 0
| 0.091354
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d87f8b647f237794f75914da625ea130e200c3
| 5,959
|
py
|
Python
|
ppo_new/baseline.py
|
QingXinHu123/Lane_change_RL
|
06c70e6f58d3478669b56800028e320ca03f5222
|
[
"MIT"
] | 1
|
2022-03-17T03:40:57.000Z
|
2022-03-17T03:40:57.000Z
|
ppo_new/baseline.py
|
QingXinHu123/Lane_change_RL
|
06c70e6f58d3478669b56800028e320ca03f5222
|
[
"MIT"
] | null | null | null |
ppo_new/baseline.py
|
QingXinHu123/Lane_change_RL
|
06c70e6f58d3478669b56800028e320ca03f5222
|
[
"MIT"
] | null | null | null |
import os, sys
from env.LaneChangeEnv import LaneChangeEnv
import random
import numpy as np
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
def episode_generator(pi, env, is_gui, ttc, gap, sumoseed, randomseed):
egoid = 'lane1.' + str(random.randint(1, 6))
ob = env.reset(egoid=egoid, tlane=0, tfc=2, is_gui=is_gui, sumoseed=sumoseed, randomseed=randomseed)
traci.vehicle.setColor(egoid, (255, 69, 0))
cur_ep_ret = 0 # return in current episode
cur_ep_ret_detail = 0
cur_ep_len = 0 # len of current episode
cur_ep_obs = []
cur_ep_acs = []
while True:
ac = pi(ob=ob, env=env, ttc=ttc, gap=gap)
ob, rew, new, info = env.step(ac)
cur_ep_ret += rew
cur_ep_ret_detail += np.array(list(info['reward_dict'].values()))
cur_ep_len += 1
cur_ep_obs.append(ob)
cur_ep_acs.append(ac)
if new:
return {"ep_obs": cur_ep_obs, "ep_acs": cur_ep_acs,
"ep_ret": cur_ep_ret, 'ep_rets_detail': cur_ep_ret_detail, "ep_len": cur_ep_len,
'ep_num_danger': info['num_danger'], 'ep_is_success': info['is_success'], 'ep_num_crash': info['num_crash'],
'ep_is_collision': info["is_collision"]}
def pi_baseline(ob, env, ttc, gap):
# safety gap set to seconds to collision
if env.ego.trgt_leader:
leader_speed = env.ego.trgt_leader.speed
else:
leader_speed = env.ego.speed
if env.ego.trgt_follower:
follower_speed = env.ego.trgt_follower.speed
else:
follower_speed = env.ego.speed
leader_dis = abs(ob[3 * 4 + 0 + 1])*239.8
follower_dis = abs(ob[4 * 4 + 0 + 1])*239.8
TTC = (leader_dis - 5) / max(env.ego.speed, 0.001)
TTC2 = (follower_dis - 5) / max(follower_speed, 0.001)
# print(TTC, TTC)
if TTC > ttc and TTC2 > ttc and leader_dis > gap and follower_dis > gap:
ac_lat = 1 # change lane
else:
ac_lat = 0 # abort
ac = ac_lat * 3 + 1
return ac
def evaluate_baseline(num_eps, ttc, gap, is_gui):
sumoseed = 0
randomseed = 0
pi = pi_baseline
env = LaneChangeEnv(is_train=False)
ret_eval = 0
ret_det_eval = 0 # not a integer, will be broadcasted
danger_num = 0
crash_num = 0
level_1_danger = []
level_2_danger = []
collision_num = 0
ep_len_list = []
success_num = 0
for i in range(num_eps):
ep_eval = episode_generator(pi, env, is_gui=is_gui, ttc=ttc, gap=gap, sumoseed=sumoseed, randomseed=randomseed)
ret_eval += ep_eval['ep_ret']
ret_det_eval += ep_eval['ep_rets_detail']
danger_num += ep_eval['ep_num_danger']
crash_num += ep_eval['ep_num_crash']
level_1_danger.append(1 if ep_eval['ep_num_danger'] > 0 else 0)
level_2_danger.append((1 if ep_eval['ep_num_crash'] > 0 else 0))
collision_num += ep_eval['ep_is_collision']
success_num += int(ep_eval['ep_is_success'])
if ep_eval['ep_is_success']:
ep_len_list.append(ep_eval['ep_len'])
sumoseed += 1
randomseed += 1
ret_eval /= float(num_eps)
ret_det_eval /= float(num_eps)
danger_rate = danger_num / num_eps
crash_rate = crash_num / num_eps
level_1_danger_rate = np.mean(level_1_danger)
level_2_danger_rate = np.mean(level_2_danger)
coll_rate = collision_num / num_eps
success_rate = success_num / float(num_eps)
success_len = np.mean(ep_len_list)
print('reward_detail: ', ret_det_eval)
print('reward: ', ret_eval,
'\ndanger_rate: ', danger_rate,
'\ncrash_rate: ', crash_rate,
'\nlevel-1-danger_rate: ', level_1_danger_rate,
'\nlevel-2-danger_rate: ', level_2_danger_rate,
'\ncollision_rate: ', coll_rate,
'\nsuccess_rate: ', success_rate,
'\nsucess_len: ', success_len)
env.close()
return ret_eval, danger_rate, crash_rate, level_1_danger_rate, level_2_danger_rate, coll_rate, success_rate, success_len
NUM_EPS = 100
IS_GUI = False
# f = open('../data/baseline_evaluation/testseed2.csv', 'w+')
# safety_gap = 2
constraints_list = [3.0] # [1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 20.0]
ttcs = [0.1, 0.3, 0.5, 1, 2, 3]
# ttcs = [2]
gap = 0
reward_list = []
danger_rate_list = []
crash_rate_list = []
level_1_danger_list = []
level_2_danger_list = []
coll_rate_list = []
succ_rate_list = []
succ_len_list = []
for ttc in ttcs:
ret_eval, danger_rate, crash_rate, level_1_danger_rate, level_2_danger_rate, coll_rate, success_rate, success_len = evaluate_baseline(NUM_EPS, ttc, gap, IS_GUI)
reward_list.append(ret_eval)
danger_rate_list.append(danger_rate)
crash_rate_list.append(crash_rate)
level_1_danger_list.append(level_1_danger_rate)
level_2_danger_list.append(level_2_danger_rate)
coll_rate_list.append(coll_rate)
succ_rate_list.append(success_rate)
succ_len_list.append(success_len)
print('reward: ', reward_list)
print('danger rate: ', danger_rate_list)
print('crash rate: ', crash_rate_list)
print('level-1-danger_rate: ', level_1_danger_list)
print('level-2-danger_rate: ', level_2_danger_list)
print('collison rate: ', coll_rate_list)
print('success rate: ', succ_rate_list)
print('sucess len: ', succ_len_list)
# reward: [-89.12552753359037, -69.84537459892903, -73.81562785829651, -148.23580687485645, -227.71842861064192, -229.9101089174337]
# danger rate: [2.13, 0.88, 0.77, 1.88, 3.82, 3.82]
# crash rate: [0.58, 0.33, 0.5, 1.24, 2.09, 2.09]
# level-1-danger_rate: [0.23, 0.09, 0.05, 0.14, 0.25, 0.25]
# level-2-danger_rate: [0.05, 0.03, 0.05, 0.12, 0.2, 0.2]
# collison rate: [0.0, 0.0, 0.02, 0.09, 0.14, 0.14]
# success rate: [0.99, 0.99, 0.9, 0.6, 0.08, 0.05]
# sucess len: [55.656565656565654, 62.43434343434343, 67.5, 90.1, 66.625, 73.4]
| 36.558282
| 164
| 0.659171
| 964
| 5,959
| 3.770747
| 0.192946
| 0.071527
| 0.042916
| 0.030812
| 0.196699
| 0.150757
| 0.088033
| 0.08033
| 0.047868
| 0.047868
| 0
| 0.078652
| 0.208424
| 5,959
| 162
| 165
| 36.783951
| 0.691965
| 0.135593
| 0
| 0.030534
| 0
| 0
| 0.117911
| 0.008575
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022901
| false
| 0
| 0.038168
| 0
| 0.083969
| 0.083969
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9d8a3bc2867b57ba7db6ffd06a68bdf7372909c
| 1,261
|
py
|
Python
|
clean_data.py
|
toogy/pendigits-hmm
|
03382e1457941714439d40b67e53eaf117fe4d08
|
[
"MIT"
] | null | null | null |
clean_data.py
|
toogy/pendigits-hmm
|
03382e1457941714439d40b67e53eaf117fe4d08
|
[
"MIT"
] | null | null | null |
clean_data.py
|
toogy/pendigits-hmm
|
03382e1457941714439d40b67e53eaf117fe4d08
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle
from collections import defaultdict
from parsing import parser
from analysis import training
def main():
parse = parser.Parser();
train_digits = parse.parse_file('data/pendigits-train');
test_digits = parse.parse_file('data/pendigits-test')
centroids = training.get_digit_kmeans_centroids(
train_digits, 256 - 3)
training.set_digit_observations(
train_digits, centroids, 256)
training.set_digit_observations(
test_digits, centroids, 256)
train_sequences = defaultdict(list)
test_sequences = []
n_test_sequences = len(test_digits)
test_expected_labels = np.ndarray(shape=(n_test_sequences,))
for digit in train_digits:
train_sequences[digit.label].append(digit.np_array_observations)
for i, digit in enumerate(test_digits):
test_sequences.append(digit.np_array_observations)
test_expected_labels[i] = digit.label
with open('train_sequences', 'wb') as f:
pickle.dump(train_sequences, f)
with open('test_sequences', 'wb') as f:
pickle.dump(test_sequences, f)
with open('test_expected_labels', 'wb') as f:
pickle.dump(test_expected_labels, f)
if __name__ == '__main__':
main()
| 24.25
| 72
| 0.704996
| 162
| 1,261
| 5.185185
| 0.314815
| 0.092857
| 0.085714
| 0.039286
| 0.286905
| 0.163095
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.199048
| 1,261
| 51
| 73
| 24.72549
| 0.821782
| 0
| 0
| 0.0625
| 0
| 0
| 0.080888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.15625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9db09c1d1c26d802117168878ef76954cf77560
| 3,360
|
py
|
Python
|
matrixprofile/algorithms/snippets.py
|
KSaiRahul21/matrixprofile
|
d8250e30d90ed0453bb7c35bb34ab0c04ae7b334
|
[
"Apache-2.0"
] | null | null | null |
matrixprofile/algorithms/snippets.py
|
KSaiRahul21/matrixprofile
|
d8250e30d90ed0453bb7c35bb34ab0c04ae7b334
|
[
"Apache-2.0"
] | null | null | null |
matrixprofile/algorithms/snippets.py
|
KSaiRahul21/matrixprofile
|
d8250e30d90ed0453bb7c35bb34ab0c04ae7b334
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.mpdist import mpdist_vector
def snippets(ts, snippet_size, num_snippets=2, window_size=None):
"""
The snippets algorithm is used to summarize your time series by
identifying N number of representative subsequences. If you want to
identify typical patterns in your time series, then this is the algorithm
to use.
Parameters
----------
ts : array_like
The time series.
snippet_size : int
The size of snippet desired.
num_snippets : int, Default 2
The number of snippets you would like to find.
window_size : int, Default (snippet_size / 2)
The window size.
Returns
-------
list : snippets
A list of snippets as dictionary objects with the following structure.
>>> {
>>> fraction: fraction of the snippet,
>>> index: the index of the snippet,
>>> snippet: the snippet values
>>> }
"""
ts = core.to_np_array(ts).astype('d')
n = len(ts)
if not isinstance(snippet_size, int) or snippet_size < 4:
raise ValueError('snippet_size must be an integer >= 4')
if n < (2 * snippet_size):
raise ValueError('Time series is too short relative to snippet length')
if not window_size:
window_size = int(np.floor(snippet_size / 2))
if window_size >= snippet_size:
raise ValueError('window_size must be smaller than snippet_size')
# pad end of time series with zeros
num_zeros = int(snippet_size * np.ceil(n / snippet_size) - n)
ts = np.append(ts, np.zeros(num_zeros))
# compute all profiles
indices = np.arange(0, len(ts) - snippet_size, snippet_size)
distances = []
for j, i in enumerate(indices):
distance = mpdist_vector(ts, ts[i:(i + snippet_size - 1)], int(window_size))
distances.append(distance)
distances = np.array(distances)
# find N snippets
snippets = []
minis = np.inf
total_min = None
for n in range(num_snippets):
minims = np.inf
for i in range(len(indices)):
s = np.sum(np.minimum(distances[i, :], minis))
if minims > s:
minims = s
index = i
minis = np.minimum(distances[index, :], minis)
actual_index = indices[index]
snippet = ts[actual_index:actual_index + snippet_size]
snippet_distance = distances[index]
snippets.append({
'index': actual_index,
'snippet': snippet,
'distance': snippet_distance
})
if isinstance(total_min, type(None)):
total_min = snippet_distance
else:
total_min = np.minimum(total_min, snippet_distance)
# compute the fraction of each snippet
for snippet in snippets:
mask = (snippet['distance'] <= total_min)
snippet['fraction'] = mask.sum() / (len(ts) - snippet_size)
total_min = total_min - mask
del snippet['distance']
return snippets
| 29.734513
| 84
| 0.633036
| 428
| 3,360
| 4.799065
| 0.32243
| 0.091042
| 0.031159
| 0.025316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004512
| 0.274405
| 3,360
| 112
| 85
| 30
| 0.837982
| 0.275595
| 0
| 0
| 0
| 0
| 0.078608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.125
| 0
| 0.160714
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9db24edad8766b6e734d6a8a9c26aff6bb04235
| 2,360
|
py
|
Python
|
jina/logging/formatter.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | 1
|
2020-12-23T12:34:00.000Z
|
2020-12-23T12:34:00.000Z
|
jina/logging/formatter.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
jina/logging/formatter.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
import json
import re
from copy import copy
from logging import Formatter
from .profile import used_memory
from ..helper import colored
class ColorFormatter(Formatter):
"""Format the log into colored logs based on the log-level. """
MAPPING = {
'DEBUG': dict(color='white', on_color=None), # white
'INFO': dict(color='white', on_color=None), # cyan
'WARNING': dict(color='yellow', on_color='on_grey'), # yellow
'ERROR': dict(color='red', on_color=None), # 31 for red
'CRITICAL': dict(color='white', on_color='on_red'), # white on red bg
'SUCCESS': dict(color='green', on_color=None), # white on red bg
} #: log-level to color mapping
def format(self, record):
cr = copy(record)
seq = self.MAPPING.get(cr.levelname, self.MAPPING['INFO']) # default white
cr.msg = colored(cr.msg, **seq)
return super().format(cr)
class PlainFormatter(Formatter):
"""Remove all control chars from the log and format it as plain text
Also restrict the max-length of msg to 512
"""
def format(self, record):
cr = copy(record)
if isinstance(cr.msg, str):
cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))[:512]
return super().format(cr)
class JsonFormatter(Formatter):
"""Format the log message as a JSON object so that it can be later used/parsed in browser with javascript. """
KEYS = {'created', 'filename', 'funcName', 'levelname', 'lineno', 'msg',
'module', 'name', 'pathname', 'process', 'thread', 'processName',
'threadName', 'log_id'} #: keys to extract from the log
def format(self, record):
cr = copy(record)
cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))
return json.dumps(
{k: getattr(cr, k) for k in self.KEYS if hasattr(cr, k)},
sort_keys=True)
class ProfileFormatter(Formatter):
"""Format the log message as JSON object and add the current used memory into it"""
def format(self, record):
cr = copy(record)
if isinstance(cr.msg, dict):
cr.msg.update({k: getattr(cr, k) for k in ['created', 'module', 'process', 'thread']})
cr.msg['memory'] = used_memory(unit=1)
return json.dumps(cr.msg, sort_keys=True)
else:
return ''
| 34.705882
| 114
| 0.601695
| 319
| 2,360
| 4.410658
| 0.363636
| 0.03909
| 0.031272
| 0.054016
| 0.297797
| 0.248756
| 0.170576
| 0.102345
| 0.102345
| 0.06823
| 0
| 0.008475
| 0.25
| 2,360
| 67
| 115
| 35.223881
| 0.786441
| 0.205085
| 0
| 0.222222
| 0
| 0
| 0.133297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9dcf24da986778ebcd29602d923908626cfea3c
| 4,263
|
py
|
Python
|
mtl/util/pipeline.py
|
vandurme/TFMTL
|
5958187900bdf67089a237c523b6caa899f63ac1
|
[
"Apache-2.0"
] | 10
|
2019-05-18T22:23:44.000Z
|
2022-01-25T15:24:45.000Z
|
mtl/util/pipeline.py
|
vandurme/TFMTL
|
5958187900bdf67089a237c523b6caa899f63ac1
|
[
"Apache-2.0"
] | 1
|
2020-01-07T15:24:16.000Z
|
2020-01-15T00:39:01.000Z
|
mtl/util/pipeline.py
|
vandurme/TFMTL
|
5958187900bdf67089a237c523b6caa899f63ac1
|
[
"Apache-2.0"
] | 1
|
2021-12-02T02:24:06.000Z
|
2021-12-02T02:24:06.000Z
|
# Copyright 2018 Johns Hopkins University. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import tensorflow as tf
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import parsing_ops
class Pipeline(object):
def __init__(self, tfrecord_file, feature_map, batch_size=32,
num_threads=4, prefetch_buffer_size=1,
static_max_length=None, shuffle_buffer_size=10000,
shuffle=True, num_epochs=None, one_shot=False):
self._feature_map = feature_map
self._batch_size = batch_size
self._static_max_length = static_max_length
# Initialize the dataset
dataset = tf.data.TFRecordDataset(tfrecord_file)
# Maybe randomize
if shuffle:
dataset = dataset.shuffle(shuffle_buffer_size)
# Maybe repeat
if num_epochs is None:
dataset = dataset.repeat() # repeat indefinitely
elif num_epochs > 1:
dataset = dataset.repeat(count=num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(self.parse_example,
num_parallel_calls=num_threads)
# Pre-fetch a batch for faster processing
dataset = dataset.prefetch(prefetch_buffer_size)
# Get the iterator
if one_shot:
self._iterator = dataset.make_one_shot_iterator()
else:
self._iterator = dataset.make_initializable_iterator()
self._init_op = self._iterator.initializer
# Get outputs
self._outputs = self._iterator.get_next()
# Map to features
index = 0
result = {}
for key in sorted(self._feature_map.keys()):
result[key] = self._outputs[index]
index += 1
self._result = result
def pad(self, t):
s = tf.shape(t)
paddings = [[0, 0], [0, self._static_max_length - s[1]]]
x = tf.pad(t, paddings, 'CONSTANT', constant_values=0)
x = tf.reshape(x, [s[0], self._static_max_length])
assert x.get_shape().as_list()[1] is self._static_max_length
return x
def parse_example(self, serialized):
parsed = parsing_ops.parse_example(serialized, self._feature_map)
result = []
for key in sorted(self._feature_map.keys()):
val = parsed[key]
if isinstance(val, sparse_tensor_lib.SparseTensor):
dense_tensor = tf.sparse_tensor_to_dense(val)
if self._static_max_length is not None:
dense_tensor = self.pad(dense_tensor)
result.append(dense_tensor)
else:
result.append(val)
return tuple(result)
@property
def iterator(self):
return self._iterator
@property
def init_op(self):
return self._init_op
@property
def batch(self):
return self._result
# namedtuple for bucket_info object (used in Pipeline)
# func: a mapping from examples to tf.int64 keys
# pads: a set of tf shapes that correspond to padded examples
bucket_info = namedtuple("bucket_info", "func pads")
def int64_feature(value):
""" Takes a single int (e.g. 3) and converts it to a tf Feature """
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(sequence):
""" Sequence of ints (e.g [1,2,3]) to TF feature """
return tf.train.Feature(int64_list=tf.train.Int64List(value=sequence))
| 34.658537
| 80
| 0.649073
| 546
| 4,263
| 4.847985
| 0.357143
| 0.023801
| 0.039668
| 0.03589
| 0.088402
| 0.073291
| 0.073291
| 0.073291
| 0.073291
| 0.044579
| 0
| 0.014111
| 0.251935
| 4,263
| 122
| 81
| 34.942623
| 0.81593
| 0.254985
| 0
| 0.097222
| 0
| 0
| 0.00892
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 1
| 0.111111
| false
| 0
| 0.097222
| 0.041667
| 0.319444
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9de795b7b1298f8cad5f30e914735224920a0f9
| 1,158
|
py
|
Python
|
core/views.py
|
moiyad/image
|
d4515ef3057794f38268a6887bfff157115f26f7
|
[
"MIT"
] | null | null | null |
core/views.py
|
moiyad/image
|
d4515ef3057794f38268a6887bfff157115f26f7
|
[
"MIT"
] | null | null | null |
core/views.py
|
moiyad/image
|
d4515ef3057794f38268a6887bfff157115f26f7
|
[
"MIT"
] | null | null | null |
from django.core.files.storage import FileSystemStorage
from django.shortcuts import render, redirect
from core.forms import DocumentForm
from core.models import Document
from media import image_cv2
def home(request):
documents = Document.objects.all()
number = len(image_cv2.myList)
return render(request, 'core/home.html', {'documents': documents, 'number': number})
def simple_upload(request):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
return render(request, 'core/simple_upload.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, 'core/simple_upload.html')
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
else:
form = DocumentForm()
return render(request, 'core/model_form_upload.html', {
'form': form
})
| 30.473684
| 88
| 0.668394
| 137
| 1,158
| 5.532847
| 0.343066
| 0.063325
| 0.100264
| 0.121372
| 0.187335
| 0.187335
| 0.102902
| 0
| 0
| 0
| 0
| 0.002208
| 0.217617
| 1,158
| 37
| 89
| 31.297297
| 0.834437
| 0
| 0
| 0.066667
| 0
| 0
| 0.126943
| 0.06304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.166667
| 0
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9dfea4e7beba7ec415b85a76c49ed3af214dec4
| 25,442
|
py
|
Python
|
ml4chem/atomistic/models/neuralnetwork.py
|
muammar/mlchem
|
365487c23ea3386657e178e56ab31adfe8d5d073
|
[
"BSD-3-Clause-LBNL"
] | 77
|
2019-08-05T17:30:22.000Z
|
2022-03-28T14:31:35.000Z
|
ml4chem/atomistic/models/neuralnetwork.py
|
muammar/ml4chem
|
365487c23ea3386657e178e56ab31adfe8d5d073
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2019-07-31T18:59:38.000Z
|
2020-10-18T18:15:07.000Z
|
ml4chem/atomistic/models/neuralnetwork.py
|
muammar/mlchem
|
365487c23ea3386657e178e56ab31adfe8d5d073
|
[
"BSD-3-Clause-LBNL"
] | 15
|
2020-02-28T10:11:21.000Z
|
2021-12-01T13:45:33.000Z
|
import dask
import datetime
import logging
import time
import torch
import numpy as np
import pandas as pd
from collections import OrderedDict
from ml4chem.metrics import compute_rmse
from ml4chem.atomistic.models.base import DeepLearningModel, DeepLearningTrainer
from ml4chem.atomistic.models.loss import AtomicMSELoss
from ml4chem.optim.handler import get_optimizer, get_lr_scheduler, get_lr
from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters
from pprint import pformat
# Setting precision and starting logger object
torch.set_printoptions(precision=10)
logger = logging.getLogger()
class NeuralNetwork(DeepLearningModel, torch.nn.Module):
"""Atom-centered Neural Network Regression with Pytorch
This model is based on Ref. 1 by Behler and Parrinello.
Parameters
----------
hiddenlayers : tuple
Structure of hidden layers in the neural network.
activation : str
Activation functions. Supported "tanh", "relu", or "celu".
References
----------
1. Behler, J. & Parrinello, M. Generalized Neural-Network Representation
of High-Dimensional Potential-Energy Surfaces. Phys. Rev. Lett. 98,
146401 (2007).
2. Khorshidi, A. & Peterson, A. A. Amp : A modular approach to machine
learning in atomistic simulations. Comput. Phys. Commun. 207, 310–324
(2016).
"""
NAME = "PytorchPotentials"
@classmethod
def name(cls):
"""Returns name of class"""
return cls.NAME
def __init__(self, hiddenlayers=(3, 3), activation="relu", **kwargs):
super(DeepLearningModel, self).__init__()
self.hiddenlayers = hiddenlayers
self.activation = activation
def prepare_model(self, input_dimension, data=None, purpose="training"):
"""Prepare the model
Parameters
----------
input_dimension : int
Input's dimension.
data : object
Data object created from the handler.
purpose : str
Purpose of this model: 'training', 'inference'.
"""
self.input_dimension = input_dimension
activation = {
"tanh": torch.nn.Tanh,
"relu": torch.nn.ReLU,
"celu": torch.nn.CELU,
}
hl = len(self.hiddenlayers)
if purpose == "training":
logger.info(" ")
logger.info("Model")
logger.info("=====")
now = datetime.datetime.now()
logger.info(
"Module accessed on {}.".format(now.strftime("%Y-%m-%d %H:%M:%S"))
)
logger.info("Model name: {}.".format(self.name()))
logger.info("Number of hidden-layers: {}".format(hl))
logger.info(
"Structure of Neural Net: {}".format(
"(input, " + str(self.hiddenlayers)[1:-1] + ", output)"
)
)
layers = range(len(self.hiddenlayers) + 1)
try:
unique_element_symbols = data.unique_element_symbols[purpose]
except TypeError:
unique_element_symbols = data.get_unique_element_symbols(purpose=purpose)
unique_element_symbols = unique_element_symbols[purpose]
symbol_model_pair = []
for symbol in unique_element_symbols:
linears = []
intercept_name = "intercept_" + symbol
slope_name = "slope_" + symbol
if purpose == "training":
intercept = (data.max_energy + data.min_energy) / 2.0
intercept = torch.nn.Parameter(
torch.tensor(intercept, requires_grad=True)
)
slope = (data.max_energy - data.min_energy) / 2.0
slope = torch.nn.Parameter(torch.tensor(slope, requires_grad=True))
self.register_parameter(intercept_name, intercept)
self.register_parameter(slope_name, slope)
elif purpose == "inference":
intercept = torch.nn.Parameter(torch.tensor(0.0))
slope = torch.nn.Parameter(torch.tensor(0.0))
self.register_parameter(intercept_name, intercept)
self.register_parameter(slope_name, slope)
for index in layers:
# This is the input layer
if index == 0:
out_dimension = self.hiddenlayers[0]
_linear = torch.nn.Linear(input_dimension, out_dimension)
linears.append(_linear)
linears.append(activation[self.activation]())
# This is the output layer
elif index == len(self.hiddenlayers):
inp_dimension = self.hiddenlayers[index - 1]
out_dimension = 1
_linear = torch.nn.Linear(inp_dimension, out_dimension)
linears.append(_linear)
# These are hidden-layers
else:
inp_dimension = self.hiddenlayers[index - 1]
out_dimension = self.hiddenlayers[index]
_linear = torch.nn.Linear(inp_dimension, out_dimension)
linears.append(_linear)
linears.append(activation[self.activation]())
# Stacking up the layers.
linears = torch.nn.Sequential(*linears)
symbol_model_pair.append([symbol, linears])
self.linears = torch.nn.ModuleDict(symbol_model_pair)
if purpose == "training":
total_params, train_params = get_number_of_parameters(self)
logger.info("Total number of parameters: {}.".format(total_params))
logger.info("Number of training parameters: {}.".format(train_params))
logger.info(" ")
logger.info(self.linears)
# Iterate over all modules and just intialize those that are
# a linear layer.
logger.warning(
"Initialization of weights with Xavier Uniform by " "default."
)
for m in self.modules():
if isinstance(m, torch.nn.Linear):
# nn.init.normal_(m.weight) # , mean=0, std=0.01)
torch.nn.init.xavier_uniform_(m.weight)
def forward(self, X):
"""Forward propagation
This is forward propagation and it returns the atomic energy.
Parameters
----------
X : list
List of inputs in the feature space.
Returns
-------
outputs : tensor
A list of tensors with energies per image.
"""
outputs = []
for hash in X:
image = X[hash]
atomic_energies = []
for symbol, x in image:
# FIXME this conditional can be removed after de/serialization
# is fixed.
if isinstance(symbol, bytes):
symbol = symbol.decode("utf-8")
x = self.linears[symbol](x)
intercept_name = "intercept_" + symbol
slope_name = "slope_" + symbol
slope = getattr(self, slope_name)
intercept = getattr(self, intercept_name)
x = (slope * x) + intercept
atomic_energies.append(x)
atomic_energies = torch.cat(atomic_energies)
image_energy = torch.sum(atomic_energies)
outputs.append(image_energy)
outputs = torch.stack(outputs)
return outputs
def get_activations(self, images, model=None, numpy=True):
"""Get activations of each hidden-layer
This function allows to extract activations of each hidden-layer of
the neural network.
Parameters
----------
image : dict
Image with structure hash, features.
model : object
A ML4Chem model object.
numpy : bool
Whether we want numpy arrays or tensors.
Returns
-------
activations : DataFrame
A DataFrame with activations for each layer.
"""
activations = []
columns = ["hash", "atom.index", "atom.symbol"]
if model is None:
model = self
model.eval()
for hash, data in images.items():
for index, (symbol, features) in enumerate(data):
counter = 0
layer_counter = 0
for l, layer in enumerate(model.linears[symbol].modules()):
if isinstance(layer, torch.nn.Linear) and counter == 0:
x = layer(features)
if numpy:
data_ = [hash, index, symbol, x.detach_().numpy()]
else:
data_ = [hash, index, symbol, x.detach_()]
layer_column_name = f"layer{layer_counter}"
if layer_column_name not in columns:
columns.append(layer_column_name)
counter += 1
layer_counter += 1
elif isinstance(layer, torch.nn.Linear) and counter > 0:
x = layer(x)
if numpy:
data_.append(x.detach_().numpy())
else:
data_.append(x.detach_())
layer_column_name = f"layer{layer_counter}"
if layer_column_name not in columns:
columns.append(layer_column_name)
counter += 1
layer_counter += 1
activations.append(data_)
del data_
# Create DataFrame from lists
df = pd.DataFrame(activations, columns=columns)
return df
class train(DeepLearningTrainer):
"""Train the model
Parameters
----------
inputs : dict
Dictionary with hashed feature space.
targets : list
The expected values that the model has to learn aka y.
model : object
The NeuralNetwork class.
data : object
Data object created from the handler.
optimizer : tuple
The optimizer is a tuple with the structure:
>>> ('adam', {'lr': float, 'weight_decay'=float})
epochs : int
Number of full training cycles.
regularization : float
This is the L2 regularization. It is not the same as weight decay.
convergence : dict
Instead of using epochs, users can set a convergence criterion.
Supported keys are "training" and "test".
lossfxn : obj
A loss function object.
device : str
Calculation can be run in the cpu or cuda (gpu).
batch_size : int
Number of data points per batch to use for training. Default is None.
lr_scheduler : tuple
Tuple with structure: scheduler's name and a dictionary with keyword
arguments.
>>> lr_scheduler = ('ReduceLROnPlateau',
{'mode': 'min', 'patience': 10})
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
checkpoint : dict
Set checkpoints. Dictionary with following structure:
>>> checkpoint = {"label": label, "checkpoint": 100, "path": ""}
`label` refers to the name used to save the checkpoint, `checkpoint`
is a integer or -1 for saving all epochs, and the path is where the
checkpoint is stored. Default is None and no checkpoint is saved.
test : dict
A dictionary used to compute the error over a validation/test set
during training procedures.
>>> test = {"features": test_space, "targets": test_targets, "data": data_test}
The keys,values of the dictionary are:
- "data": a `Data` object.
- "targets": test set targets.
- "features": a feature space obtained using `features.calculate()`.
"""
def __init__(
self,
inputs,
targets,
model=None,
data=None,
optimizer=(None, None),
regularization=None,
epochs=100,
convergence=None,
lossfxn=None,
device="cpu",
batch_size=None,
lr_scheduler=None,
uncertainty=None,
checkpoint=None,
test=None,
):
self.initial_time = time.time()
if lossfxn is None:
lossfxn = AtomicMSELoss
logger.info("")
logger.info("Training")
logger.info("========")
logger.info(f"Convergence criteria: {convergence}")
logger.info(f"Loss function: {lossfxn.__name__}")
if uncertainty is not None:
logger.info("Options:")
logger.info(f" - Uncertainty penalization: {pformat(uncertainty)}")
logger.info("")
atoms_per_image = data.atoms_per_image
if batch_size is None:
batch_size = len(inputs.values())
if isinstance(batch_size, int):
# Data batches
chunks = list(get_chunks(inputs, batch_size, svm=False))
targets = list(get_chunks(targets, batch_size, svm=False))
atoms_per_image = list(get_chunks(atoms_per_image, batch_size, svm=False))
if uncertainty != None:
uncertainty = list(get_chunks(uncertainty, batch_size, svm=False))
uncertainty = [
torch.tensor(u, requires_grad=False, dtype=torch.float)
for u in uncertainty
]
logger.info("")
logging.info("Batch Information")
logging.info("-----------------")
logging.info("Number of batches: {}.".format(len(chunks)))
logging.info("Batch size: {} elements per batch.".format(batch_size))
logger.info(" ")
atoms_per_image = [
torch.tensor(n_atoms, requires_grad=False, dtype=torch.float)
for n_atoms in atoms_per_image
]
targets = [torch.tensor(t, requires_grad=False) for t in targets]
if device == "cuda":
logger.info("Moving data to CUDA...")
atoms_per_image = atoms_per_image.cuda()
targets = targets.cuda()
_inputs = OrderedDict()
for hash, f in inputs.items():
_inputs[hash] = []
for features in f:
symbol, vector = features
_inputs[hash].append((symbol, vector.cuda()))
inputs = _inputs
move_time = time.time() - self.initial_time
h, m, s = convert_elapsed_time(move_time)
logger.info(
"Data moved to GPU in {} hours {} minutes {:.2f} \
seconds.".format(
h, m, s
)
)
logger.info(" ")
# Define optimizer
self.optimizer_name, self.optimizer = get_optimizer(
optimizer, model.parameters()
)
if lr_scheduler is not None:
self.scheduler = get_lr_scheduler(self.optimizer, lr_scheduler)
self.atoms_per_image = atoms_per_image
self.convergence = convergence
self.device = device
self.epochs = epochs
self.model = model
self.lr_scheduler = lr_scheduler
self.lossfxn = lossfxn
self.checkpoint = checkpoint
self.test = test
# Data scattering
client = dask.distributed.get_client()
self.chunks = [client.scatter(chunk) for chunk in chunks]
self.targets = [client.scatter(target) for target in targets]
if uncertainty != None:
self.uncertainty = [client.scatter(u) for u in uncertainty]
else:
self.uncertainty = uncertainty
# Let the hunger games begin...
self.trainer()
def trainer(self):
"""Run the training class"""
logger.info(" ")
logger.info("Starting training...\n")
if self.test is None:
logger.info(
"{:6s} {:19s} {:12s} {:12s} {:8s}".format(
"Epoch", "Time Stamp", "Loss", "Error/img", "Error/atom"
)
)
logger.info(
"{:6s} {:19s} {:12s} {:8s} {:8s}".format(
"------",
"-------------------",
"------------",
"------------",
"------------",
)
)
else:
test_features = self.test.get("features", None)
test_targets = self.test.get("targets", None)
test_data = self.test.get("data", None)
logger.info(
"{:6s} {:19s} {:12s} {:12s} {:12s} {:12s} {:16s}".format(
"Epoch",
"Time Stamp",
"Loss",
"Error/img",
"Error/atom",
"Error/img (t)",
"Error/atom (t)",
)
)
logger.info(
"{:6s} {:19s} {:12s} {:8s} {:8s} {:8s} {:8s}".format(
"------",
"-------------------",
"------------",
"------------",
"------------",
"------------",
"------------",
)
)
converged = False
_loss = []
_rmse = []
epoch = 0
client = dask.distributed.get_client()
while not converged:
epoch += 1
self.optimizer.zero_grad() # clear previous gradients
loss, outputs_ = train.closure(
self.chunks,
self.targets,
self.uncertainty,
self.model,
self.lossfxn,
self.atoms_per_image,
self.device,
)
# We step the optimizer
if self.optimizer_name != "LBFGS":
self.optimizer.step()
else:
options = {"closure": self.closure, "current_loss": loss, "max_ls": 10}
self.optimizer.step(options)
# RMSE per image and per/atom
rmse = client.submit(compute_rmse, *(outputs_, self.targets))
atoms_per_image = torch.cat(self.atoms_per_image)
rmse_atom = client.submit(
compute_rmse, *(outputs_, self.targets, atoms_per_image)
)
rmse = rmse.result()
rmse_atom = rmse_atom.result()
_loss.append(loss.item())
_rmse.append(rmse)
# In the case that lr_scheduler is not None
if self.lr_scheduler is not None:
self.scheduler.step(loss)
print("Epoch {} lr {}".format(epoch, get_lr(self.optimizer)))
ts = time.time()
ts = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d " "%H:%M:%S")
if self.test is None:
logger.info(
"{:6d} {} {:8e} {:4e} {:4e}".format(
epoch, ts, loss.detach(), rmse, rmse_atom
)
)
else:
test_model = self.model.eval()
test_predictions = test_model(test_features).detach()
rmse_test = client.submit(
compute_rmse, *(test_predictions, test_targets)
)
atoms_per_image_test = torch.tensor(
test_data.atoms_per_image, requires_grad=False
)
rmse_atom_test = client.submit(
compute_rmse,
*(test_predictions, test_targets, atoms_per_image_test),
)
rmse_test = rmse_test.result()
rmse_atom_test = rmse_atom_test.result()
logger.info(
"{:6d} {} {:8e} {:4e} {:4e} {:4e} {:4e}".format(
epoch,
ts,
loss.detach(),
rmse,
rmse_atom,
rmse_test,
rmse_atom_test,
)
)
if self.checkpoint is not None:
self.checkpoint_save(epoch, self.model, **self.checkpoint)
if self.convergence is None and epoch == self.epochs:
converged = True
elif self.convergence is not None and rmse < self.convergence["energy"]:
converged = True
training_time = time.time() - self.initial_time
h, m, s = convert_elapsed_time(training_time)
logger.info(
"Training finished in {} hours {} minutes {:.2f} seconds.".format(h, m, s)
)
@classmethod
def closure(
Cls, chunks, targets, uncertainty, model, lossfxn, atoms_per_image, device
):
"""Closure
This class method clears previous gradients, iterates over batches,
accumulates the gradients, reduces the gradients, update model
params, and finally returns loss and outputs_.
Parameters
----------
Cls : object
Class object.
chunks : tensor or list
Tensor with input data points in batch with index.
targets : tensor or list
The targets.
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
model : obj
Pytorch model to perform forward() and get gradients.
lossfxn : obj
A loss function object.
atoms_per_image : list
Atoms per image because we are doing atom-centered methods.
device : str
Are we running cuda or cpu?
"""
outputs_ = []
# Get client to send futures to the scheduler
client = dask.distributed.get_client()
running_loss = torch.tensor(0, dtype=torch.float)
accumulation = []
grads = []
# Accumulation of gradients
for index, chunk in enumerate(chunks):
accumulation.append(
client.submit(
train.train_batches,
*(
index,
chunk,
targets,
uncertainty,
model,
lossfxn,
atoms_per_image,
device,
),
)
)
dask.distributed.wait(accumulation)
accumulation = client.gather(accumulation)
for outputs, loss, grad in accumulation:
grad = np.array(grad, dtype=object)
running_loss += loss
outputs_.append(outputs)
grads.append(grad)
grads = sum(grads)
for index, param in enumerate(model.parameters()):
param.grad = torch.tensor(grads[index], dtype=torch.float)
del accumulation
del grads
return running_loss, outputs_
@classmethod
def train_batches(
Cls, index, chunk, targets, uncertainty, model, lossfxn, atoms_per_image, device
):
"""A function that allows training per batches
Parameters
----------
index : int
Index of batch.
chunk : tensor or list
Tensor with input data points in batch with index.
targets : tensor or list
The targets.
model : obj
Pytorch model to perform forward() and get gradients.
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
lossfxn : obj
A loss function object.
atoms_per_image : list
Atoms per image because we are doing atom-centered methods.
device : str
Are we running cuda or cpu?
Returns
-------
loss : tensor
The loss function of the batch.
"""
inputs = OrderedDict(chunk)
outputs = model(inputs)
if uncertainty == None:
loss = lossfxn(outputs, targets[index], atoms_per_image[index])
else:
loss = lossfxn(
outputs, targets[index], atoms_per_image[index], uncertainty[index]
)
loss.backward()
gradients = []
for param in model.parameters():
try:
gradient = param.grad.detach().numpy()
except AttributeError:
# This exception catches the case where an image does not
# contain variable that is following the gradient of certain
# atom. For example, suppose two batches with 2 molecules each.
# In the first batch we have only C, H, O but it turns out that
# N is also available only in the second batch. The
# contribution of the total gradient from the first batch for N is 0.
gradient = 0.0
gradients.append(gradient)
return outputs, loss, gradients
| 33.742706
| 88
| 0.526924
| 2,573
| 25,442
| 5.094054
| 0.175282
| 0.025177
| 0.025788
| 0.00763
| 0.271229
| 0.247044
| 0.232319
| 0.202487
| 0.173877
| 0.155413
| 0
| 0.008501
| 0.375835
| 25,442
| 753
| 89
| 33.787517
| 0.816814
| 0.228166
| 0
| 0.201814
| 0
| 0.004535
| 0.071482
| 0.00118
| 0
| 0
| 0
| 0.001328
| 0
| 1
| 0.020408
| false
| 0
| 0.031746
| 0
| 0.070295
| 0.006803
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e018d6290ebe7b0654b7e76a8df225914e3778
| 7,104
|
py
|
Python
|
hatsploit/core/db/db.py
|
EntySec/HatSploit
|
8e445804c252cc24e87888be2c2efc02750ce5ee
|
[
"MIT"
] | 139
|
2021-02-17T15:52:30.000Z
|
2022-03-30T14:50:42.000Z
|
hatsploit/core/db/db.py
|
YurinDoctrine/HatSploit
|
b1550323e08336ec057cbafb77003c22a3bbee91
|
[
"MIT"
] | 27
|
2021-03-24T17:14:30.000Z
|
2022-03-02T18:50:43.000Z
|
hatsploit/core/db/db.py
|
YurinDoctrine/HatSploit
|
b1550323e08336ec057cbafb77003c22a3bbee91
|
[
"MIT"
] | 85
|
2021-02-17T15:39:03.000Z
|
2022-03-07T09:08:58.000Z
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import os
from hatsploit.core.cli.badges import Badges
from hatsploit.lib.config import Config
from hatsploit.lib.storage import LocalStorage
class DB:
badges = Badges()
config = Config()
local_storage = LocalStorage()
def disconnect_payload_database(self, name):
if self.local_storage.get("connected_payload_databases"):
if name in self.local_storage.get("connected_payload_databases"):
self.local_storage.delete_element("connected_payload_databases", name)
self.local_storage.delete_element("payloads", name)
return
self.badges.print_error("No such payload database connected!")
def disconnect_module_database(self, name):
if self.local_storage.get("connected_module_databases"):
if name in self.local_storage.get("connected_module_databases"):
self.local_storage.delete_element("connected_module_databases", name)
self.local_storage.delete_element("modules", name)
return
self.badges.print_error("No such module database connected!")
def disconnect_plugin_database(self, name):
if self.local_storage.get("connected_plugin_databases"):
if name in self.local_storage.get("connected_plugin_databases"):
self.local_storage.delete_element("connected_plugin_databases", name)
self.local_storage.delete_element("plugins", name)
return
self.badges.print_error("No such plugin database connected!")
def connect_payload_database(self, name, path):
if self.local_storage.get("connected_payload_databases"):
if name in self.local_storage.get("connected_payload_databases"):
self.badges.print_error("Payload database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a payload database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect payload database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "payloads":
self.badges.print_error("Not a payload database!")
return
del database['__database__']
payloads = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_payload_databases"):
self.local_storage.set("connected_payload_databases", {})
self.local_storage.update("connected_payload_databases", data)
if self.local_storage.get("payloads"):
self.local_storage.update("payloads", payloads)
else:
self.local_storage.set("payloads", payloads)
def connect_module_database(self, name, path):
if self.local_storage.get("connected_module_databases"):
if name in self.local_storage.get("connected_module_databases"):
self.badges.print_error("Module database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a module database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect module database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "modules":
self.badges.print_error("Not a module database!")
return
del database['__database__']
modules = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_module_databases"):
self.local_storage.set("connected_module_databases", {})
self.local_storage.update("connected_module_databases", data)
if self.local_storage.get("modules"):
self.local_storage.update("modules", modules)
else:
self.local_storage.set("modules", modules)
def connect_plugin_database(self, name, path):
if self.local_storage.get("connected_plugin_databases"):
if name in self.local_storage.get("connected_plugin_databases"):
self.badges.print_error("Plugin database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect plugin database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "plugins":
self.badges.print_error("Not a plugin database!")
return
del database['__database__']
plugins = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_plugin_databases"):
self.local_storage.set("connected_plugin_databases", {})
self.local_storage.update("connected_plugin_databases", data)
if self.local_storage.get("plugins"):
self.local_storage.update("plugins", plugins)
else:
self.local_storage.set("plugins", plugins)
| 38.193548
| 86
| 0.639077
| 817
| 7,104
| 5.350061
| 0.190942
| 0.101579
| 0.131778
| 0.078243
| 0.642187
| 0.599176
| 0.553649
| 0.486388
| 0.461679
| 0.427362
| 0
| 0.001734
| 0.269285
| 7,104
| 185
| 87
| 38.4
| 0.840301
| 0.152872
| 0
| 0.518519
| 0
| 0
| 0.235569
| 0.105439
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.037037
| 0
| 0.244444
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e09def642ce98a753ac3053c44b1ba7d862f16
| 4,850
|
py
|
Python
|
shutTheBox/main.py
|
robi1467/shut-the-box
|
ed1a8f13bc74caa63361453e723768a9cbe1dac4
|
[
"MIT"
] | null | null | null |
shutTheBox/main.py
|
robi1467/shut-the-box
|
ed1a8f13bc74caa63361453e723768a9cbe1dac4
|
[
"MIT"
] | null | null | null |
shutTheBox/main.py
|
robi1467/shut-the-box
|
ed1a8f13bc74caa63361453e723768a9cbe1dac4
|
[
"MIT"
] | null | null | null |
import random
numbers_list = [1,2,3,4,5,6,7,8,9,10]
game_won = False
game_completed = False
#Stats
games_played = 0
games_won = 0
games_lost = 0
average_score = 0
total_score = 0
def welcome():
welcome_message = "Welcome to shut the box"
print(welcome_message)
i = 0
result = ""
while i < len(numbers_list):
if i < len(numbers_list)-1:
result += str(numbers_list[i]) + " "
else:
result += str(numbers_list[i])
i+=1
print(result)
def dice_roll(amount):
total = 0
i = 0
while i < amount:
total += random.randint(1, 6)
i+=1
return total
def choose_dice_amount():
amount = 0
while True:
try:
amount = int(input("You choose to roll one or two dice. Please enter either '1' or '2': "))
except ValueError:
print("INVALID ENTRY PLEASE TRY AGAIN")
continue
if amount == 1 or amount == 2:
return amount
else:
print("INVALID ENTRY PLEASE TRY AGAIN!")
continue
return amount
def choose_number_to_drop(target_amount):
entered = 0
goal = target_amount
entered_numbers = list()
while goal != 0:
try:
print("Available numbers: " + str(numbers_list) + " to get to " + str(target_amount))
entered = int(input("Please enter a number that is available: "))
except ValueError:
print("Invalid Entry, please try again")
continue
if entered not in numbers_list or entered in entered_numbers:
print("Invalid Entry, please try again")
continue
else:
goal -= entered
entered_numbers.append(entered)
if goal < 0:
goal = target_amount
entered_numbers = list()
i = 0
while i < len(entered_numbers):
numbers_list.remove(entered_numbers[i])
i += 1
def check_lost_game(rolled):
value = True
if rolled not in numbers_list:
i = 0
while i < len(numbers_list):
j = i+1
while j< len(numbers_list):
if numbers_list[i] + numbers_list[j] == rolled:
return False
k = j+1
while k < len(numbers_list):
if numbers_list[i] + numbers_list[j] + numbers_list[k] == rolled:
return False
l = k+1
while l < len(numbers_list):
if numbers_list[i] + numbers_list[j] + numbers_list[k] + numbers_list[l] == rolled:
return False
l+=1
k+=1
j+=1
i +=1
else:
value = False
return value
def end_game():
game_completed = True
return game_completed
def win_game():
game_won = True
return game_won
def score_game():
score = 0
i = 0
while i < len(numbers_list):
score += numbers_list[i]
i+=1
return score
def all_less_than_7():
less_than_7 = True
i = 0
while i < len(numbers_list):
if numbers_list[i] > 6:
less_than_7 = False
i += 1
return less_than_7
def keep_playing_input():
while True:
try:
continue_playing = (input("Do you wish to keep playing? y or n: "))
except ValueError:
print("Invalid choice; please try again")
continue
if continue_playing.lower == "y":
return True
else:
return False
keep_playing = True
while keep_playing:
numbers_list = [1,2,3,4,5,6,7,8,9,10]
welcome()
roll_total = 0
while roll_total < 55:
dice_amount = 2
if all_less_than_7():
dice_amount = choose_dice_amount()
dice_total = dice_roll(dice_amount)
print("Your roll is: " + str(dice_total))
if check_lost_game(dice_total):
print("It is impossible to continue the game with this roll")
break
choose_number_to_drop(dice_total)
roll_total += dice_total
if roll_total == 55:
game_won = win_game()
if game_won:
print("Congrats you won!!!!")
games_played +=1
games_won +=1
else:
print("You lose, your score is " + str(score_game()))
print("Numbers remaining: " + str(numbers_list))
games_played += 1
games_lost += 1
total_score += score_game()
average_score = total_score/games_played
game_won = False
print("STATS:\n Games Played: " + str(games_played) + "\nGames Won: " + str(games_won) + "\nGames Lost: " + str(games_lost)
+ "\nAverage Score: " + str(average_score) + "\nTotal Score: " + str(total_score))
keep_playing_input()
| 28.034682
| 127
| 0.549897
| 619
| 4,850
| 4.113086
| 0.177706
| 0.129615
| 0.042419
| 0.029458
| 0.261587
| 0.225452
| 0.216811
| 0.118617
| 0.118617
| 0.118617
| 0
| 0.023672
| 0.355464
| 4,850
| 172
| 128
| 28.197674
| 0.790787
| 0.001031
| 0
| 0.341935
| 0
| 0
| 0.117052
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.006452
| 0
| 0.154839
| 0.090323
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e379a95e3f4e855adb56ee1112dc1aa95e6a78
| 9,351
|
py
|
Python
|
main.py
|
mithi/semantic-segmentation
|
85e9df04397745e0c6ab252e30991fa9b514ec1a
|
[
"MIT"
] | 33
|
2017-08-24T16:38:15.000Z
|
2022-03-17T15:55:52.000Z
|
main.py
|
mithi/semantic-segmentation
|
85e9df04397745e0c6ab252e30991fa9b514ec1a
|
[
"MIT"
] | 3
|
2018-10-12T11:17:22.000Z
|
2019-05-30T09:49:11.000Z
|
main.py
|
mithi/semantic-segmentation
|
85e9df04397745e0c6ab252e30991fa9b514ec1a
|
[
"MIT"
] | 26
|
2017-09-17T09:09:52.000Z
|
2020-01-14T02:48:56.000Z
|
import tensorflow as tf
import os.path
import warnings
from distutils.version import LooseVersion
import glob
import helper
import project_tests as tests
#--------------------------
# USER-SPECIFIED DATA
#--------------------------
# Tune these parameters
NUMBER_OF_CLASSES = 2
IMAGE_SHAPE = (160, 576)
EPOCHS = 20
BATCH_SIZE = 1
LEARNING_RATE = 0.0001
DROPOUT = 0.75
# Specify these directory paths
DATA_DIRECTORY = './data'
RUNS_DIRECTORY = './runs'
TRAINING_DATA_DIRECTORY ='./data/data_road/training'
NUMBER_OF_IMAGES = len(glob.glob('./data/data_road/training/calib/*.*'))
VGG_PATH = './data/vgg'
all_training_losses = [] # Used for plotting to visualize if our training is going well given parameters
#--------------------------
# DEPENDENCY CHECK
#--------------------------
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
#--------------------------
# PLACEHOLDER TENSORS
#--------------------------
correct_label = tf.placeholder(tf.float32, [None, IMAGE_SHAPE[0], IMAGE_SHAPE[1], NUMBER_OF_CLASSES])
learning_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32)
#--------------------------
# FUNCTIONS
#--------------------------
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
sess: TensorFlow Session
vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3, layer4, layer7)
"""
# load the model and weights
model = tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)
# Get Tensors to be returned from graph
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name('image_input:0')
keep_prob = graph.get_tensor_by_name('keep_prob:0')
layer3 = graph.get_tensor_by_name('layer3_out:0')
layer4 = graph.get_tensor_by_name('layer4_out:0')
layer7 = graph.get_tensor_by_name('layer7_out:0')
return image_input, keep_prob, layer3, layer4, layer7
def conv_1x1(layer, layer_name):
""" Return the output of a 1x1 convolution of a layer """
return tf.layers.conv2d(inputs = layer,
filters = NUMBER_OF_CLASSES,
kernel_size = (1, 1),
strides = (1, 1),
name = layer_name)
def upsample(layer, k, s, layer_name):
""" Return the output of transpose convolution given kernel_size k and strides s """
return tf.layers.conv2d_transpose(inputs = layer,
filters = NUMBER_OF_CLASSES,
kernel_size = (k, k),
strides = (s, s),
padding = 'same',
name = layer_name)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes = NUMBER_OF_CLASSES):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
vgg_layerX_out: TF Tensor for VGG Layer X output
num_classes: Number of classes to classify
return: The Tensor for the last layer of output
"""
# Use a shorter variable name for simplicity
layer3, layer4, layer7 = vgg_layer3_out, vgg_layer4_out, vgg_layer7_out
# Apply a 1x1 convolution to encoder layers
layer3x = conv_1x1(layer = layer3, layer_name = "layer3conv1x1")
layer4x = conv_1x1(layer = layer4, layer_name = "layer4conv1x1")
layer7x = conv_1x1(layer = layer7, layer_name = "layer7conv1x1")
# Add decoder layers to the network with skip connections and upsampling
# Note: the kernel size and strides are the same as the example in Udacity Lectures
# Semantic Segmentation Scene Understanding Lesson 10-9: FCN-8 - Decoder
decoderlayer1 = upsample(layer = layer7x, k = 4, s = 2, layer_name = "decoderlayer1")
decoderlayer2 = tf.add(decoderlayer1, layer4x, name = "decoderlayer2")
decoderlayer3 = upsample(layer = decoderlayer2, k = 4, s = 2, layer_name = "decoderlayer3")
decoderlayer4 = tf.add(decoderlayer3, layer3x, name = "decoderlayer4")
decoderlayer_output = upsample(layer = decoderlayer4, k = 16, s = 8, layer_name = "decoderlayer_output")
return decoderlayer_output
def optimize(nn_last_layer, correct_label, learning_rate, num_classes = NUMBER_OF_CLASSES):
"""
Build the TensorFLow loss and optimizer operations.
nn_last_layer: TF Tensor of the last layer in the neural network
correct_label: TF Placeholder for the correct label image
learning_rate: TF Placeholder for the learning rate
num_classes: Number of classes to classify
return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# Reshape 4D tensors to 2D, each row represents a pixel, each column a class
logits = tf.reshape(nn_last_layer, (-1, num_classes))
class_labels = tf.reshape(correct_label, (-1, num_classes))
# The cross_entropy_loss is the cost which we are trying to minimize to yield higher accuracy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = class_labels)
cross_entropy_loss = tf.reduce_mean(cross_entropy)
# The model implements this operation to find the weights/parameters that would yield correct pixel labels
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
sess: TF Session
epochs: Number of epochs
batch_size: Batch size
get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
train_op: TF Operation to train the neural network
cross_entropy_loss: TF Tensor for the amount of loss
input_image: TF Placeholder for input images
correct_label: TF Placeholder for label images
keep_prob: TF Placeholder for dropout keep probability
learning_rate: TF Placeholder for learning rate
"""
for epoch in range(EPOCHS):
losses, i = [], 0
for images, labels in get_batches_fn(BATCH_SIZE):
i += 1
feed = { input_image: images,
correct_label: labels,
keep_prob: DROPOUT,
learning_rate: LEARNING_RATE }
_, partial_loss = sess.run([train_op, cross_entropy_loss], feed_dict = feed)
print("---> iteration: ", i, " partial loss:", partial_loss)
losses.append(partial_loss)
training_loss = sum(losses) / len(losses)
all_training_losses.append(training_loss)
print("------------------")
print("epoch: ", epoch + 1, " of ", EPOCHS, "training loss: ", training_loss)
print("------------------")
def run_tests():
tests.test_layers(layers)
tests.test_optimize(optimize)
tests.test_for_kitti_dataset(DATA_DIRECTORY)
tests.test_train_nn(train_nn)
def run():
""" Run a train a model and save output images resulting from the test image fed on the trained model """
# Get vgg model if we can't find it where it should be
helper.maybe_download_pretrained_vgg(DATA_DIRECTORY)
# A function to get batches
get_batches_fn = helper.gen_batch_function(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)
with tf.Session() as session:
# Returns the three layers, keep probability and input layer from the vgg architecture
image_input, keep_prob, layer3, layer4, layer7 = load_vgg(session, VGG_PATH)
# The resulting network architecture from adding a decoder on top of the given vgg model
model_output = layers(layer3, layer4, layer7, NUMBER_OF_CLASSES)
# Returns the output logits, training operation and cost operation to be used
# - logits: each row represents a pixel, each column a class
# - train_op: function used to get the right parameters to the model to correctly label the pixels
# - cross_entropy_loss: function outputting the cost which we are minimizing, lower cost should yield higher accuracy
logits, train_op, cross_entropy_loss = optimize(model_output, correct_label, learning_rate, NUMBER_OF_CLASSES)
# Initialize all variables
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
# Train the neural network
train_nn(session, EPOCHS, BATCH_SIZE, get_batches_fn,
train_op, cross_entropy_loss, image_input,
correct_label, keep_prob, learning_rate)
# Run the model with the test images and save each painted output image (roads painted green)
helper.save_inference_samples(RUNS_DIRECTORY, DATA_DIRECTORY, session, IMAGE_SHAPE, logits, keep_prob, image_input)
#--------------------------
# MAIN
#--------------------------
if __name__ == "__main__":
run_tests()
run() # Run a train a model and save output images resulting from the test image fed on the trained model
print(all_training_losses)
| 37.8583
| 146
| 0.69276
| 1,265
| 9,351
| 4.904348
| 0.223715
| 0.027079
| 0.028369
| 0.018375
| 0.220986
| 0.157318
| 0.120406
| 0.106705
| 0.066731
| 0.042553
| 0
| 0.017317
| 0.197198
| 9,351
| 246
| 147
| 38.012195
| 0.809112
| 0.378141
| 0
| 0.055046
| 0
| 0
| 0.093315
| 0.010584
| 0
| 0
| 0
| 0
| 0.009174
| 1
| 0.073395
| false
| 0
| 0.06422
| 0
| 0.183486
| 0.06422
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e38ca4d963e2aa4de106573e34682092b6337e
| 22,356
|
py
|
Python
|
tests/scanner/audit/log_sink_rules_engine_test.py
|
BrunoReboul/forseti-security
|
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
|
[
"Apache-2.0"
] | null | null | null |
tests/scanner/audit/log_sink_rules_engine_test.py
|
BrunoReboul/forseti-security
|
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
|
[
"Apache-2.0"
] | null | null | null |
tests/scanner/audit/log_sink_rules_engine_test.py
|
BrunoReboul/forseti-security
|
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the LogSinkRulesEngine."""
import unittest
import mock
from tests.unittest_utils import ForsetiTestCase
from tests.unittest_utils import get_datafile_path
from google.cloud.forseti.common.gcp_type.billing_account import BillingAccount
from google.cloud.forseti.common.gcp_type.folder import Folder
from google.cloud.forseti.common.gcp_type.log_sink import LogSink
from google.cloud.forseti.common.gcp_type.organization import Organization
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
class LogSinkRulesEngineTest(ForsetiTestCase):
"""Tests for the LogSinkRulesEngine."""
def setUp(self):
"""Set up GCP resources for tests."""
self.lsre = lsre
self.lsre.LOGGER = mock.MagicMock()
# Set up resources in the following hierarchy:
# +-----> billing_acct_abcd
# |
# |
# +-----------------------> proj-1
# |
# |
# org_234 +-----> folder_56 +-----> proj-2
# |
# |
# +-----------------------> proj-3
self.org_234 = Organization(
'234',
display_name='Organization 234',
full_name='organization/234/',
data='fake_org_data_234')
self.billing_acct_abcd = BillingAccount(
'ABCD-1234',
display_name='Billing Account ABCD',
full_name='organization/234/billingAccount/ABCD-1234/',
data='fake_billing_account_data_abcd')
self.folder_56 = Folder(
'56',
display_name='Folder 56',
full_name='organization/234/folder/56/',
data='fake_folder_data456456')
self.proj_1 = Project(
'proj-1',
project_number=11223344,
display_name='My project 1',
parent=self.org_234,
full_name='organization/234/project/proj-1/',
data='fake_project_data_2341')
self.proj_2 = Project(
'proj-2',
project_number=223344,
display_name='My project 2',
parent=self.folder_56,
full_name='organization/234/folder/56/project/proj-2/',
data='fake_project_data_4562')
self.proj_3 = Project(
'proj-3',
project_number=33445566,
display_name='My project 3',
parent=self.org_234,
full_name='organization/234/project/proj-3/',
data='fake_project_data_1233')
def get_engine_with_valid_rules(self):
"""Create a rule engine build with a valid rules file."""
rules_local_path = get_datafile_path(
__file__, 'log_sink_test_valid_rules.yaml')
rules_engine = self.lsre.LogSinkRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book()
return rules_engine
def test_build_rule_book_from_local_yaml_file_works(self):
"""Tests that a RuleBook is built correctly with a yaml file."""
rules_engine = self.get_engine_with_valid_rules()
# Creates 'self' rules for 5 difference resources and 'children' rules
# for 2.
self.assertEqual(
6, len(rules_engine.rule_book.resource_rules_map['self']))
self.assertEqual(
2, len(rules_engine.rule_book.resource_rules_map['children']))
self_rule_resources = []
for resource in rules_engine.rule_book.resource_rules_map['self']:
self_rule_resources.append(resource.name)
expected_rule_resources = [
'billingAccounts/ABCD-1234', 'folders/56', 'organizations/234',
'projects/proj-1', 'projects/proj-2', 'projects/proj-3']
self.assertEqual(expected_rule_resources, sorted(self_rule_resources))
child_rule_resources = []
for resource in rules_engine.rule_book.resource_rules_map['children']:
child_rule_resources.append(resource.name)
expected_rule_resources = ['folders/56', 'organizations/234']
self.assertEqual(expected_rule_resources, sorted(child_rule_resources))
def test_build_rule_book_invalid_applies_to_fails(self):
"""Tests that a rule with invalid applies_to type cannot be created."""
rules_local_path = get_datafile_path(
__file__, 'log_sink_test_invalid_rules.yaml')
rules_engine = self.lsre.LogSinkRulesEngine(
rules_file_path=rules_local_path)
with self.assertRaises(InvalidRulesSchemaError):
rules_engine.build_rule_book()
def test_project_with_no_violations(self):
"""Tests that no violations are produced for a correct project."""
rules_engine = self.get_engine_with_valid_rules()
# proj-1 needs an Audit Log sink.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_1_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.proj_1,
raw_json='_SINK_1_'
),
LogSink(
sink_id='compute_logs_saver',
destination=('bigquery.googleapis.com/projects/proj_1/'
'datasets/compute_logs'),
sink_filter='resource.type="gce_instance"',
include_children=False,
writer_identity=('serviceAccount:p12345-67890@'
'gcp-sa-logging.iam.gserviceaccount.com'),
parent=self.proj_1,
raw_json='_SINK_2_'
)
]
actual_violations = rules_engine.find_violations(
self.proj_1, log_sinks)
self.assertEqual(set(), actual_violations)
def test_folder_with_no_violations(self):
"""Tests that no violations are produced for a correct folder."""
rules_engine = self.get_engine_with_valid_rules()
# Rules disallow any folder-level LogSinks.
actual_violations = rules_engine.find_violations(self.folder_56, [])
self.assertEqual(set(), actual_violations)
def test_billing_account_with_no_violations(self):
"""Tests that no violations are produced for a correct billing acct."""
rules_engine = self.get_engine_with_valid_rules()
log_sinks = [
LogSink(
sink_id='billing_logs',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/billing_logs'),
sink_filter='',
include_children=False,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.billing_acct_abcd,
raw_json='__SINK_1__'
),
]
actual_violations = rules_engine.find_violations(
self.billing_acct_abcd, log_sinks)
self.assertEqual(set(), actual_violations)
def test_org_with_no_violations(self):
"""Tests that no violations are produced for a correct organization."""
rules_engine = self.get_engine_with_valid_rules()
# Org needs an Audit Log sink, but to any destination.
log_sinks = [
LogSink(
sink_id='audit_logs_to_pubsub',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.org_234,
raw_json='__SINK_1__'
)
]
actual_violations = rules_engine.find_violations(
self.org_234, log_sinks)
self.assertEqual(set(), actual_violations)
def test_project_missing_required_sinks(self):
"""Tests violations are produced for project missing required sinks."""
rules_engine = self.get_engine_with_valid_rules()
# proj-2 needs an Audit Log sink, by org-level rules, and a pubsub
# sink, by folder-level rules.
log_sinks = [
LogSink(
sink_id='non_audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_2_logs'),
sink_filter='logName:"logs/non-cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.proj_2,
raw_json='__SINK_1__'
),
LogSink(
sink_id='compute_logs_saver',
destination=('bigquery.googleapis.com/projects/proj_2/'
'datasets/compute_logs'),
sink_filter='resource.type="gce_instance"',
include_children=False,
writer_identity=('serviceAccount:p12345-67890@'
'gcp-sa-logging.iam.gserviceaccount.com'),
parent=self.proj_2,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.proj_2, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='proj-2',
resource_type='project',
resource_id='proj-2',
full_name='organization/234/folder/56/project/proj-2/',
rule_name='Require Audit Log sinks in all projects.',
rule_index=0,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('^bigquery\\.googleapis\\.com\\/projects\\/'
'my\\-audit\\-logs\\/datasets\\/.+$'),
sink_filter=('^logName\\:\\"logs\\/'
'cloudaudit\\.googleapis\\.com\\"$'),
sink_include_children='*',
resource_data=''
),
lsre.Rule.RuleViolation(
resource_name='proj-2',
resource_type='project',
resource_id='proj-2',
full_name='organization/234/folder/56/project/proj-2/',
rule_name='Require a PubSub sink in folder-56 projects.',
rule_index=3,
violation_type='LOG_SINK_VIOLATION',
sink_destination='^pubsub\\.googleapis\\.com\\/.+$',
sink_filter='^$',
sink_include_children='*',
resource_data=''
)
])
self.assertEqual(expected_violations, actual_violations)
def test_project_whitelist_violation(self):
"""Tests violations are produced for non-whitelisted sinks."""
rules_engine = self.get_engine_with_valid_rules()
# proj-3 can only have BigQuery sinks.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_1_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.proj_3,
raw_json='__SINK_1__'
),
LogSink(
sink_id='audit_logs_to_pubsub',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'proj-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.proj_3,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.proj_3, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='projects/proj-3/sinks/audit_logs_to_pubsub',
resource_type='sink',
resource_id='audit_logs_to_pubsub',
full_name='organization/234/project/proj-3/audit_logs_to_pubsub/',
rule_name='Only allow BigQuery sinks in Proj-1 and Proj-3.',
rule_index=4,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('pubsub.googleapis.com/projects/proj-3/'
'topics/proj-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
sink_include_children=True,
resource_data='__SINK_2__'
)
])
self.assertEqual(expected_violations, actual_violations)
def test_folder_blacklist_violation(self):
"""Tests violations are produced for blacklisted sinks."""
rules_engine = self.get_engine_with_valid_rules()
# Rules disallow any folder-level LogSinks.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/folder_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.folder_56,
raw_json='__SINK_1__'
)
]
actual_violations = rules_engine.find_violations(
self.folder_56, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='folders/56/sinks/audit_logs_to_bq',
resource_type='sink',
resource_id='audit_logs_to_bq',
full_name='organization/234/folder/56/audit_logs_to_bq/',
rule_name='Disallow folder sinks.',
rule_index=2,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/folder_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
sink_include_children=False,
resource_data='__SINK_1__')
])
self.assertEqual(expected_violations, actual_violations)
def test_billing_account_with_whitelist_violations(self):
"""Tests violations are produced for billing account sinks."""
rules_engine = self.get_engine_with_valid_rules()
log_sinks = [
LogSink(
sink_id='billing_logs',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/wrong_dataset'),
sink_filter='',
include_children=False,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.billing_acct_abcd,
raw_json='__SINK_1__'
),
]
actual_violations = rules_engine.find_violations(
self.billing_acct_abcd, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_type='sink',
resource_id='billing_logs',
resource_name='billingAccounts/ABCD-1234/sinks/billing_logs',
full_name='organization/234/billingAccount/ABCD-1234/billing_logs/',
rule_name=('Only allow Billing Account sinks to audit logs '
'project.'),
rule_index=6,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/wrong_dataset'),
sink_filter='',
sink_include_children=False,
resource_data='__SINK_1__')
])
self.assertEqual(expected_violations, actual_violations)
def test_org_missing_required_sinks(self):
"""Tests violations are produced for an org missing required sinks."""
rules_engine = self.get_engine_with_valid_rules()
# Org needs an Audit Log sink, including children.
log_sinks = [
LogSink(
sink_id='sink_not_including_children',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.org_234,
raw_json='__SINK_1__'
),
LogSink(
sink_id='sink_with_wrong_filter',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-more-logs'),
sink_filter='logName:"logs/otherapi.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:logs@test.gserviceaccount.com',
parent=self.org_234,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.org_234, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='234',
resource_type='organization',
resource_id='234',
full_name='organization/234/',
rule_name='Require an Org Level audit log sink.',
rule_index=1,
violation_type='LOG_SINK_VIOLATION',
sink_destination='^.*$',
sink_filter=('^logName\\:\\"logs\\/'
'cloudaudit\\.googleapis\\.com\\"$'),
sink_include_children=True,
resource_data=''
)
])
self.assertEqual(expected_violations, actual_violations)
def test_add_invalid_rules(self):
"""Tests that adding invalid rules raises exceptions."""
rule_book = self.lsre.LogSinkRuleBook(global_configs=None)
valid_resource = {
'type': 'organization',
'applies_to': 'children',
'resource_ids': ['1234']
}
valid_sink_spec = {
'destination': 'bigquery.*',
'filter': '',
'include_children': '*'
}
rule_book.add_rule(
{
'name': 'Valid rule',
'resource': [valid_resource],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, 0)
bad_rules = [
{},
{
'name': 'Mising Resource',
'mode': 'whitelist',
'sink': valid_sink_spec,
}, {
'name': 'Mising sink',
'resource': [valid_resource],
'mode': 'whitelist',
}, {
'name': 'Bad mode',
'resource': [valid_resource],
'sink': valid_sink_spec,
'mode': 'other',
}, {
'name': 'Bad resource type',
'resource': [{
'type': 'bucket',
'applies_to': 'self',
'resource_ids': ['bucket-1']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Bad applies to type',
'resource': [{
'type': 'folder',
'applies_to': 'self_and_children',
'resource_ids': ['56']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Bad applies to type',
'resource': [{
'type': 'billing_account',
'applies_to': 'children',
'resource_ids': ['ABCD-1234']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Empty resource_ids',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': []
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Missing filter',
'resource': [valid_resource],
'sink': {
'destination': 'bigquery.*',
'include_children': '*'
},
'mode': 'whitelist'
}, {
'name': 'Bad include_children',
'resource': [valid_resource],
'sink': {
'destination': 'bigquery.*',
'filter': '*',
'include_children': 'Yes'
},
'mode': 'whitelist'
}
]
for rule in bad_rules:
with self.assertRaises(InvalidRulesSchemaError):
rule_book.add_rule(rule, 1)
if __name__ == '__main__':
unittest.main()
| 40.79562
| 84
| 0.560834
| 2,214
| 22,356
| 5.358627
| 0.115176
| 0.026888
| 0.028321
| 0.023264
| 0.711986
| 0.662087
| 0.633513
| 0.584289
| 0.544842
| 0.498483
| 0
| 0.019762
| 0.334541
| 22,356
| 547
| 85
| 40.870201
| 0.777711
| 0.097916
| 0
| 0.579646
| 0
| 0
| 0.242757
| 0.142273
| 0
| 0
| 0
| 0
| 0.033186
| 1
| 0.030973
| false
| 0
| 0.024336
| 0
| 0.059735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e3fca3aec04c54b087304757154615d5a67e58
| 2,852
|
py
|
Python
|
backend/api/ulca-ums-service/user-management/utilities/orgUtils.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 3
|
2022-01-12T06:51:51.000Z
|
2022-02-23T18:54:33.000Z
|
backend/api/ulca-ums-service/user-management/utilities/orgUtils.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 6
|
2021-08-31T19:21:26.000Z
|
2022-01-03T05:53:42.000Z
|
backend/api/ulca-ums-service/user-management/utilities/orgUtils.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 8
|
2021-08-12T08:07:49.000Z
|
2022-01-25T04:40:51.000Z
|
import uuid
from config import USR_ORG_MONGO_COLLECTION, USR_MONGO_COLLECTION
import db
from models.response import post_error
import logging
log = logging.getLogger('file')
class OrgUtils:
def __init__(self):
pass
#orgId generation
@staticmethod
def generate_org_id():
"""UUID generation for org registeration"""
return(uuid.uuid4().hex)
@staticmethod
def validate_org(org_code):
"""Validating Org
Org should be registered and active on Anuvaad system.
"""
try:
#connecting to mongo instance/collection
collections = db.get_db()[USR_ORG_MONGO_COLLECTION]
#searching for active org record
result = collections.find({"code": org_code}, {"_id": 0, "active": 1})
if result.count() == 0:
return post_error("Invalid Organization", "No such registered organization with the given Org Id", None)
for value in result:
if value["active"] == False:
return post_error("Invalid Organization", "Organization is currently inactive", None)
except Exception as e:
log.exception(f"Db connection exception : {e}")
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None)
@staticmethod
def validate_org_upsert(i,org):
"""Org validation on upsert
deactivation of org allowed only once all the users in the corresponding org is inactive.
"""
if "code" not in org or not org["code"]:
return post_error("Data Missing", "code not found", None)
if "active" not in org:
return post_error("Data Missing", "active not found", None)
code = str(org["code"]).upper()
active = org["active"]
if not isinstance(active,bool):
return post_error("Invalid format", "active should be bool", None), 400
if active == False:
try:
#connecting to mongo instance/collection
collections = db.get_db()[USR_MONGO_COLLECTION]
#searching for active users in the org
result = collections.find({"orgID": code,"is_active":True})
if result.count()!=0:
log.info("Deactivation request for org failed, {} active users with the orgID".format(str(result.count())))
return post_error("Deactivation Failed","There exist active users in {} hence this action cannot be performed".format(code),None)
except Exception as e:
log.exception(f"Db connection exception : {e}")
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None)
| 41.333333
| 149
| 0.619565
| 341
| 2,852
| 5.082111
| 0.337243
| 0.04674
| 0.069244
| 0.038084
| 0.355453
| 0.248125
| 0.248125
| 0.248125
| 0.248125
| 0.248125
| 0
| 0.003916
| 0.283661
| 2,852
| 69
| 150
| 41.333333
| 0.844347
| 0.135694
| 0
| 0.244444
| 0
| 0
| 0.271559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0.022222
| 0.111111
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e478ed385905aa26b48748e1fbf896e8ced766
| 4,299
|
py
|
Python
|
setup.py
|
AntonBiryukovUofC/diffvg
|
e081098f52b82bfd0b7e91114d289d65ef969a60
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
AntonBiryukovUofC/diffvg
|
e081098f52b82bfd0b7e91114d289d65ef969a60
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
AntonBiryukovUofC/diffvg
|
e081098f52b82bfd0b7e91114d289d65ef969a60
|
[
"Apache-2.0"
] | null | null | null |
# Adapted from https://github.com/pybind/cmake_example/blob/master/setup.py
import os
import re
import sys
import platform
import subprocess
import importlib
from sysconfig import get_paths
import importlib
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
from distutils.sysconfig import get_config_var
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir, build_with_cuda):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.build_with_cuda = build_with_cuda
class Build(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
super().run()
def build_extension(self, ext):
if isinstance(ext, CMakeExtension):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
info = get_paths()
include_path = info['include']
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_INCLUDE_PATH=' + include_path,
]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir),
'-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j8']
if ext.build_with_cuda:
cmake_args += ['-DDIFFVG_CUDA=1']
else:
cmake_args += ['-DDIFFVG_CUDA=0']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
env_build = env
env["CXX"] = "/usr/bin/g++-5"
env["CC"] = "/usr/bin/gcc-5"
env_build["CXX"] = "/usr/bin/g++-5"
env_build["CC"] = "/usr/bin/gcc-5"
env["PATH"] = "/usr/local/cuda-10.1/bin" + ":" + os.environ['PATH']
env_build["PATH"] = "/usr/local/cuda-10.1/bin" + ":" + os.environ['PATH']
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp, env=env_build)
else:
super().build_extension(ext)
torch_spec = importlib.util.find_spec("torch")
tf_spec = importlib.util.find_spec("tensorflow")
packages = []
build_with_cuda = False
if torch_spec is not None:
packages.append('pydiffvg')
import torch
if torch.cuda.is_available():
build_with_cuda = True
if tf_spec is not None and sys.platform != 'win32':
packages.append('pydiffvg_tensorflow')
if not build_with_cuda:
import tensorflow as tf
if tf.test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=None):
build_with_cuda = True
if len(packages) == 0:
print('Error: PyTorch or Tensorflow must be installed. For Windows platform only PyTorch is supported.')
exit()
# Override build_with_cuda with environment variable
if 'DIFFVG_CUDA' in os.environ:
build_with_cuda = os.environ['DIFFVG_CUDA'] == '1'
setup(name='diffvg',
version='0.0.1',
install_requires=["svgpathtools"],
description='Differentiable Vector Graphics',
ext_modules=[CMakeExtension('diffvg', '', build_with_cuda)],
cmdclass=dict(build_ext=Build, install=install),
packages=packages,
zip_safe=False)
| 38.044248
| 109
| 0.601303
| 508
| 4,299
| 4.879921
| 0.322835
| 0.039935
| 0.057685
| 0.017749
| 0.160145
| 0.124647
| 0.049213
| 0.028237
| 0.028237
| 0.028237
| 0
| 0.007939
| 0.267504
| 4,299
| 112
| 110
| 38.383929
| 0.779295
| 0.028844
| 0
| 0.075269
| 0
| 0
| 0.172339
| 0.048418
| 0.021505
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.182796
| 0
| 0.236559
| 0.010753
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e64ab7c515862e0dec6a8272d8a276b9bd86b9
| 14,587
|
py
|
Python
|
robotpy_ext/common_drivers/navx/registerio.py
|
twinters007/robotpy-wpilib-utilities
|
d2e18c16fc97a469e0621521e0fbed0093610d6e
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2017-01-16T03:10:57.000Z
|
2017-01-16T03:11:00.000Z
|
robotpy_ext/common_drivers/navx/registerio.py
|
twinters007/robotpy-wpilib-utilities
|
d2e18c16fc97a469e0621521e0fbed0093610d6e
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
robotpy_ext/common_drivers/navx/registerio.py
|
twinters007/robotpy-wpilib-utilities
|
d2e18c16fc97a469e0621521e0fbed0093610d6e
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# validated: 2017-02-19 DS c5e3a8a9b642 roborio/java/navx_frc/src/com/kauailabs/navx/frc/RegisterIO.java
#----------------------------------------------------------------------------
# Copyright (c) Kauai Labs 2015. All Rights Reserved.
#
# Created in support of Team 2465 (Kauaibots). Go Purple Wave!
#
# Open Source Software - may be modified and shared by FRC teams. Any
# modifications to this code must be accompanied by the \License.txt file
# in the root directory of the project
#----------------------------------------------------------------------------
from ._impl import AHRSProtocol, IMUProtocol, IMURegisters
from wpilib.timer import Timer
import logging
logger = logging.getLogger('navx')
__all__ = ['RegisterIO']
IO_TIMEOUT_SECONDS = 1.0
DELAY_OVERHEAD_SECONDS = 0.004
class _BoardId:
type = 0
hw_rev = 0
fw_ver_major = 0
fw_ver_minor = 0
fw_revision = 0
unique_id = [0]*12
class _BoardState:
op_status = 0
sensor_status = 0
cal_status = 0
selftest_status = 0
capability_flags = 0
update_rate_hz = 0
accel_fsr_g = 0
gyro_fsr_dps = 0
class RegisterIO:
def __init__(self, io_provider, update_rate_hz, notify_sink, board_capabilities):
"""
:param board_capabilities: must have the following callable attributes:
_isOmniMountSupported, _isBoardYawResetSupported,
_isDisplacementSupported
:param notify_sink: must have the following callable attributes:
_setYawPitchRoll, _setAHRSData, _setAHRSPosData,
_setRawData, _setBoardID, _setBoardState, _yawResetComplete
"""
self.io_provider = io_provider
self.update_rate_hz = update_rate_hz
self.board_capabilities = board_capabilities
self.notify_sink = notify_sink
self.raw_data_update = IMUProtocol.GyroUpdate()
self.ahrspos_update = AHRSProtocol.AHRSPosUpdate()
self.board_state = _BoardState()
self.board_id = _BoardId()
self.last_update_time = 0
self.byte_count = 0
self.update_count = 0
self.last_sensor_timestamp = 0
self._stop = False
def stop(self):
self._stop = True
def shutdown(self):
self.io_provider.shutdown()
def run(self):
logger.info("NavX io thread starting")
try:
self.io_provider.init()
# initial device configuration
self.setUpdateRateHz(self.update_rate_hz)
if not self.getConfiguration():
logger.warning("-- Did not get configuration data")
else:
logger.info("-- Board is %s (rev %s)",
IMURegisters.model_type(self.board_id.type),
self.board_id.hw_rev)
logger.info("-- Firmware %s.%s", self.board_id.fw_ver_major,
self.board_id.fw_ver_minor)
log_error = True
# Calculate delay to match configured update rate
# Note: some additional time is removed from the
# 1/update_rate value to ensure samples are not
# dropped, esp. at higher update rates.
update_rate = 1.0/(self.update_rate_hz & 0xFF)
if update_rate > DELAY_OVERHEAD_SECONDS:
update_rate -= DELAY_OVERHEAD_SECONDS
logger.info("-- Update rate: %shz (%.4fs)",
self.update_rate_hz, update_rate)
# IO Loop
while not self._stop:
if self.board_state.update_rate_hz != self.update_rate_hz:
self.setUpdateRateHz(self.update_rate_hz)
try:
self.getCurrentData()
except IOError:
if log_error:
logger.exception("Error getting data")
log_error = False
else:
log_error = True
Timer.delay(update_rate)
except Exception:
logger.exception("Unhandled exception in NavX thread")
finally:
logger.info("NavX i/o thread exiting")
def getConfiguration(self):
success = False
retry_count = 0
while retry_count < 5 and not success:
try:
config = self.io_provider.read(IMURegisters.NAVX_REG_WHOAMI,
IMURegisters.NAVX_REG_SENSOR_STATUS_H+1)
except IOError as e:
logger.warning("Error reading configuration data, retrying (%s)", e)
success = False
Timer.delay(0.5)
else:
board_id = self.board_id
board_id.hw_rev = config[IMURegisters.NAVX_REG_HW_REV]
board_id.fw_ver_major = config[IMURegisters.NAVX_REG_FW_VER_MAJOR]
board_id.fw_ver_minor = config[IMURegisters.NAVX_REG_FW_VER_MINOR]
board_id.type = config[IMURegisters.NAVX_REG_WHOAMI]
self.notify_sink._setBoardID(board_id)
board_state = self.board_state
board_state.cal_status = config[IMURegisters.NAVX_REG_CAL_STATUS]
board_state.op_status = config[IMURegisters.NAVX_REG_OP_STATUS]
board_state.selftest_status = config[IMURegisters.NAVX_REG_SELFTEST_STATUS]
board_state.sensor_status = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_SENSOR_STATUS_L)
board_state.gyro_fsr_dps = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_GYRO_FSR_DPS_L)
board_state.accel_fsr_g = config[IMURegisters.NAVX_REG_ACCEL_FSR_G]
board_state.update_rate_hz = config[IMURegisters.NAVX_REG_UPDATE_RATE_HZ]
board_state.capability_flags = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_CAPABILITY_FLAGS_L)
self.notify_sink._setBoardState(board_state)
success = True
retry_count += 1
return success
def getCurrentData(self):
first_address = IMURegisters.NAVX_REG_UPDATE_RATE_HZ
displacement_registers = self.board_capabilities._isDisplacementSupported()
# If firmware supports displacement data, acquire it - otherwise implement
# similar (but potentially less accurate) calculations on this processor.
if displacement_registers:
read_count = IMURegisters.NAVX_REG_LAST + 1 - first_address
else:
read_count = IMURegisters.NAVX_REG_QUAT_OFFSET_Z_H + 1 - first_address
curr_data = self.io_provider.read(first_address, read_count)
sensor_timestamp = AHRSProtocol.decodeBinaryUint32(curr_data, IMURegisters.NAVX_REG_TIMESTAMP_L_L-first_address)
if sensor_timestamp == self.last_sensor_timestamp:
return
self.last_sensor_timestamp = sensor_timestamp
ahrspos_update = self.ahrspos_update
ahrspos_update.op_status = curr_data[IMURegisters.NAVX_REG_OP_STATUS - first_address]
ahrspos_update.selftest_status = curr_data[IMURegisters.NAVX_REG_SELFTEST_STATUS - first_address]
ahrspos_update.cal_status = curr_data[IMURegisters.NAVX_REG_CAL_STATUS]
ahrspos_update.sensor_status = curr_data[IMURegisters.NAVX_REG_SENSOR_STATUS_L - first_address]
ahrspos_update.yaw = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_YAW_L-first_address)
ahrspos_update.pitch = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_PITCH_L-first_address)
ahrspos_update.roll = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_ROLL_L-first_address)
ahrspos_update.compass_heading = AHRSProtocol.decodeProtocolUnsignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_HEADING_L-first_address)
ahrspos_update.mpu_temp_c = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_MPU_TEMP_C_L - first_address)
ahrspos_update.world_linear_accel_x = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_X_L-first_address)
ahrspos_update.world_linear_accel_y = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_Y_L-first_address)
ahrspos_update.world_linear_accel_z = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_Z_L-first_address)
ahrspos_update.altitude = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_ALTITUDE_D_L - first_address)
ahrspos_update.baro_pressure = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_PRESSURE_DL - first_address)
ahrspos_update.fused_heading = AHRSProtocol.decodeProtocolUnsignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_FUSED_HEADING_L-first_address)
ahrspos_update.quaternionW = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_W_L-first_address)/ 32768.0
ahrspos_update.quaternionX = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_X_L-first_address)/ 32768.0
ahrspos_update.quaternionY = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_Y_L-first_address)/ 32768.0
ahrspos_update.quaternionZ = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_Z_L-first_address)/ 32768.0
if displacement_registers:
ahrspos_update.vel_x = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_X_I_L-first_address)
ahrspos_update.vel_y = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_Y_I_L-first_address)
ahrspos_update.vel_z = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_Z_I_L-first_address)
ahrspos_update.disp_x = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_X_I_L-first_address)
ahrspos_update.disp_y = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_Y_I_L-first_address)
ahrspos_update.disp_z = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_Z_I_L-first_address)
self.notify_sink._setAHRSPosData(ahrspos_update, sensor_timestamp)
else:
self.notify_sink._setAHRSData(ahrspos_update, sensor_timestamp)
board_state = self.board_state
board_state.cal_status = curr_data[IMURegisters.NAVX_REG_CAL_STATUS-first_address]
board_state.op_status = curr_data[IMURegisters.NAVX_REG_OP_STATUS-first_address]
board_state.selftest_status = curr_data[IMURegisters.NAVX_REG_SELFTEST_STATUS-first_address]
board_state.sensor_status = AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_SENSOR_STATUS_L-first_address)
board_state.update_rate_hz = curr_data[IMURegisters.NAVX_REG_UPDATE_RATE_HZ-first_address]
board_state.gyro_fsr_dps = AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_GYRO_FSR_DPS_L)
board_state.accel_fsr_g = curr_data[IMURegisters.NAVX_REG_ACCEL_FSR_G]
board_state.capability_flags= AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_CAPABILITY_FLAGS_L-first_address)
self.notify_sink._setBoardState(board_state)
raw_data_update = self.raw_data_update
raw_data_update.raw_gyro_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_X_L-first_address)
raw_data_update.raw_gyro_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_Y_L-first_address)
raw_data_update.raw_gyro_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_Z_L-first_address)
raw_data_update.raw_accel_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_X_L-first_address)
raw_data_update.raw_accel_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_Y_L-first_address)
raw_data_update.raw_accel_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_Z_L-first_address)
raw_data_update.cal_mag_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_X_L-first_address)
raw_data_update.cal_mag_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_Y_L-first_address)
raw_data_update.cal_mag_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_Z_L-first_address)
raw_data_update.mpu_temp_c = ahrspos_update.mpu_temp
self.notify_sink._setRawData(raw_data_update, sensor_timestamp)
self.last_update_time = Timer.getFPGATimestamp()
self.byte_count += len(curr_data)
self.update_count += 1
def isConnected(self):
time_since_last_update = Timer.getFPGATimestamp() - self.last_update_time
return time_since_last_update <= IO_TIMEOUT_SECONDS
def getByteCount(self):
return self.byte_count
def getUpdateCount(self):
return self.update_count
def setUpdateRateHz(self, update_rate_hz):
self.io_provider.write(IMURegisters.NAVX_REG_UPDATE_RATE_HZ, update_rate_hz)
def zeroYaw(self):
self.io_provider.write( IMURegisters.NAVX_REG_INTEGRATION_CTL,
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_YAW )
self.notify_sink._yawResetComplete()
def zeroDisplacement(self):
self.io_provider.write( IMURegisters.NAVX_REG_INTEGRATION_CTL,
(AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_X |
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_Y |
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_Z ) )
| 54.632959
| 159
| 0.676973
| 1,642
| 14,587
| 5.598051
| 0.168088
| 0.109661
| 0.130222
| 0.112272
| 0.590078
| 0.510335
| 0.413621
| 0.311684
| 0.113903
| 0.06832
| 0
| 0.014578
| 0.252279
| 14,587
| 266
| 160
| 54.838346
| 0.828184
| 0.085487
| 0
| 0.11399
| 0
| 0
| 0.019657
| 0
| 0
| 0
| 0.000302
| 0
| 0
| 1
| 0.062176
| false
| 0.005181
| 0.015544
| 0.010363
| 0.19171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e6a0bf2a4d3e860c6eb607624b101a086157b4
| 12,517
|
py
|
Python
|
RigolWFM/channel.py
|
wvdv2002/RigolWFM
|
849a1130c9194f052eaf5582dfa67e7a5708a3a3
|
[
"BSD-3-Clause"
] | null | null | null |
RigolWFM/channel.py
|
wvdv2002/RigolWFM
|
849a1130c9194f052eaf5582dfa67e7a5708a3a3
|
[
"BSD-3-Clause"
] | null | null | null |
RigolWFM/channel.py
|
wvdv2002/RigolWFM
|
849a1130c9194f052eaf5582dfa67e7a5708a3a3
|
[
"BSD-3-Clause"
] | null | null | null |
#pylint: disable=invalid-name
#pylint: disable=too-many-instance-attributes
#pylint: disable=too-many-return-statements
#pylint: disable=too-many-statements
"""
Class structure and methods for an oscilloscope channel.
The idea is to collect all the relevant information from all the Rigol
scope waveforms into a single structure that can be handled in a uniform
and consistent manner.
Specifically this lets one just use
channel.times : numpy array of signal times
channel.volts : numpy array of signal voltages
or the stringification method to describe a channel
print(channel)
"""
from enum import Enum
import numpy as np
class UnitEnum(Enum):
"""Enumerated units for scopes without them."""
w = 0
a = 1
v = 2
u = 3
def best_scale(number):
"""Scale and units for a number with proper prefix."""
absnr = abs(number)
if absnr == 0:
return 1, ' '
if absnr < 0.99999999e-9:
return 1e12, 'p'
if absnr < 0.99999999e-6:
return 1e9, 'n'
if absnr < 0.99999999e-3:
return 1e6, 'µ'
if absnr < 0.99999999:
return 1e3, 'm'
if absnr < 0.99999999e3:
return 1, ' '
if absnr < 0.99999999e6:
return 1e-3, 'k'
if absnr < 0.999999991e9:
return 1e-6, 'M'
return 1e-9, 'G'
def engineering_string(number, n_digits):
"""Format number with proper prefix."""
scale, prefix = best_scale(number)
fformat = "%%.%df %%s" % n_digits
s = fformat % (number * scale, prefix)
return s
def _channel_bytes(channel_number, w):
"""
Return right series of bytes for a channel for 1000Z scopes.
Waveform points are interleaved stored in memory when two or more
channels are saved. This unweaves them.
Args:
channel_number: the number of enabled channels before this one
w: original waveform object
Returns
byte array for specified channel
"""
offset = 0
if w.header.stride == 2: # byte pattern CHx CHy
# use odd bytes when this is the second enabled channel
if any([w.header.ch[i].enabled for i in range(channel_number-1)]):
offset = 1
elif w.header.stride == 4: # byte pattern CH4 CH3 CH2 CH1
offset = 4 - channel_number
data = np.frombuffer(w.data.raw, dtype=np.uint8)
raw_bytes = data[offset::w.header.stride]
return raw_bytes
class Channel():
"""Base class for a single channel."""
def __init__(self, w, channel_number, scope, selected='1234'):
"""
Initialize a Channel Object.
Args:
w: Wfm object
channel_number: 1, 2, 3, or 4
scope: string describing scope
selected: string with channels chosen by user
Returns:
Channel object
"""
self.channel_number = channel_number
self.name = "CH %d" % channel_number
self.waveform = w
self.seconds_per_point = w.header.seconds_per_point
self.firmware = 'unknown'
self.unit = UnitEnum.v
self.points = 0
self.raw = None
self.volts = None
self.times = None
self.coupling = 'unknown'
self.roll_stop = 0
self.time_offset = 0
self.time_scale = 1
self.enabled = False
self.enabled_and_selected = False
self.volt_scale = 1
self.volt_offset = 0
self.y_scale = 1
self.y_offset = 0
self.volt_per_division = 1
self.probe_value = 1
self.inverted = False
# determine if this channel is one of those chosen by user
chosen = selected.find(str(channel_number)) != -1
if channel_number <= len(w.header.ch):
channel = w.header.ch[channel_number-1]
self.enabled = channel.enabled
self.enabled_and_selected = channel.enabled and chosen
self.volt_scale = channel.volt_scale
self.volt_offset = channel.volt_offset
self.y_scale = channel.volt_scale
self.y_offset = channel.volt_offset
self.volt_per_division = channel.volt_per_division
self.probe_value = channel.probe_value
self.unit = channel.unit
self.inverted = channel.inverted
if scope == 'wfm1000c':
self.ds1000c(w, channel_number)
elif scope == 'wfm1000d':
self.ds1000d(w, channel_number)
elif scope == 'wfm1000e':
self.ds1000e(w, channel_number)
elif scope == 'wfm1000z':
self.ds1000z(w, channel_number)
elif scope == 'wfm2000':
self.ds2000(w, channel_number)
elif scope == 'wfm4000':
self.ds4000(w, channel_number)
elif scope == 'wfm6000':
self.ds6000(w, channel_number)
def __str__(self):
"""Describe this channel."""
s = " Channel %d:\n" % self.channel_number
s += " Coupling = %8s\n" % self.coupling.rjust(7, ' ')
s += " Scale = %10sV/div\n" % engineering_string(self.volt_per_division, 2)
s += " Offset = %10sV\n" % engineering_string(self.volt_offset, 2)
s += " Probe = %7gX\n" % self.probe_value
s += " Inverted = %8s\n\n" % self.inverted
s += " Time Base = %10ss/div\n" % engineering_string(self.time_scale, 3)
s += " Offset = %10ss\n" % engineering_string(self.time_offset, 3)
s += " Delta = %10ss/point\n" % engineering_string(self.seconds_per_point, 3)
s += " Points = %8d\n\n" % self.points
if self.enabled_and_selected:
s += " Count = [%9d,%9d,%9d ... %9d,%9d]\n" % (
1, 2, 3, self.points-1, self.points)
s += " Raw = [%9d,%9d,%9d ... %9d,%9d]\n" % (
self.raw[0], self.raw[1], self.raw[2], self.raw[-2], self.raw[-1])
t = [engineering_string(self.times[i], 3) +
"s" for i in [0, 1, 2, -2, -1]]
s += " Times = [%9s,%9s,%9s ... %9s,%9s]\n" % (
t[0], t[1], t[2], t[-2], t[-1])
v = [engineering_string(self.volts[i], 2) +
"V" for i in [0, 1, 2, -2, -1]]
s += " Volts = [%9s,%9s,%9s ... %9s,%9s]\n" % (
v[0], v[1], v[2], v[-2], v[-1])
return s
def calc_times_and_volts(self):
"""Calculate the times and voltages for this channel."""
if self.enabled_and_selected:
self.volts = self.y_scale * (127.0 - self.raw) - self.y_offset
h = self.points * self.seconds_per_point / 2
self.times = np.linspace(-h, h, self.points) + self.time_offset
def ds1000c(self, w, channel_number):
"""Interpret waveform data for 1000CD series scopes."""
self.time_scale = 1.0e-12 * w.header.time_scale
self.time_offset = 1.0e-12 * w.header.time_offset
if channel_number == 1:
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
if channel_number == 2:
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000d(self, w, channel_number):
"""Interpret waveform data for 1000CD series scopes."""
self.time_scale = 1.0e-12 * w.header.time_scale
self.time_offset = 1.0e-12 * w.header.time_offset
if channel_number == 1:
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
if channel_number == 2:
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000e(self, w, channel_number):
"""Interpret waveform data for 1000D and 1000E series scopes."""
self.roll_stop = w.header.roll_stop
if channel_number == 1:
self.time_offset = w.header.ch1_time_offset
self.time_scale = w.header.ch1_time_scale
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
elif channel_number == 2:
self.time_offset = w.header.ch2_time_offset
self.time_scale = w.header.ch2_time_scale
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000z(self, w, channel_number):
"""Interpret waveform for the Rigol DS1000Z series."""
self.time_scale = w.header.time_scale
self.time_offset = w.header.time_offset
self.points = w.header.points
self.stride = w.header.stride
self.firmware = w.preheader.firmware_version
self.probe = w.header.ch[channel_number-1].probe_value
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = w.header.ch[channel_number-1].y_scale
self.y_offset = w.header.ch[channel_number-1].y_offset
if self.enabled_and_selected:
self.raw = _channel_bytes(channel_number, w)
self.points = len(self.raw)
self.calc_times_and_volts()
def ds2000(self, w, channel_number):
"""Interpret waveform for the Rigol DS2000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.storage_depth
self.firmware = w.header.firmware_version
self.unit = UnitEnum(w.header.ch[channel_number-1].unit_actual)
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = -self.volt_scale
self.y_offset = self.volt_offset
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
def ds4000(self, w, channel_number):
"""Interpret waveform for the Rigol DS4000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.points
self.firmware = w.header.firmware_version
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = -self.volt_scale
self.y_offset = self.volt_offset
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
def ds6000(self, w, channel_number):
"""Interpret waveform for the Rigol DS6000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.points
self.firmware = w.header.firmware_version
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.unit = w.header.ch[channel_number-1].unit
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.array(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.array(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.array(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.array(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
| 36.176301
| 96
| 0.589199
| 1,687
| 12,517
| 4.227623
| 0.140486
| 0.100252
| 0.037297
| 0.043186
| 0.509675
| 0.43396
| 0.406057
| 0.37521
| 0.360348
| 0.331183
| 0
| 0.046496
| 0.298953
| 12,517
| 345
| 97
| 36.281159
| 0.766268
| 0.149157
| 0
| 0.373913
| 0
| 0
| 0.053795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056522
| false
| 0
| 0.008696
| 0
| 0.143478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9e6a9be08cb7ae14c68608c944b95cbe6233b10
| 1,477
|
py
|
Python
|
configs/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu.py
|
esf-bt2020/mmdetection
|
abc5fe060e0fcb716f845c85441be3741b22d3cf
|
[
"Apache-2.0"
] | null | null | null |
configs/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu.py
|
esf-bt2020/mmdetection
|
abc5fe060e0fcb716f845c85441be3741b22d3cf
|
[
"Apache-2.0"
] | null | null | null |
configs/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu.py
|
esf-bt2020/mmdetection
|
abc5fe060e0fcb716f845c85441be3741b22d3cf
|
[
"Apache-2.0"
] | null | null | null |
_base_ = '../faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_stages=4,
#frozen_stages=4
),
roi_head=dict(
bbox_head=dict(
num_classes=3
)
)
)
dataset_type = 'COCODataset'
classes = ('luchs', 'rotfuchs', 'wolf')
data = dict(
train=dict(
img_prefix='raubtierv2a/train/',
classes=classes,
ann_file='raubtierv2a/train/_annotations.coco.json'),
val=dict(
img_prefix='raubtierv2a/valid/',
classes=classes,
ann_file='raubtierv2a/valid/_annotations.coco.json'),
test=dict(
img_prefix='raubtierv2a/test/',
classes=classes,
ann_file='raubtierv2a/test/_annotations.coco.json'))
#optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) #original (8x2=16)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) #(4x2=8) 4 GPUs
#optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001) #(1x2=2)
total_epochs=24
evaluation = dict(classwise=True, interval=1, metric='bbox')
work_dir = '/media/storage1/projects/WilLiCam/checkpoint_workdir/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu'
#http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth
load_from = 'checkpoints/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth'
| 26.375
| 151
| 0.704807
| 203
| 1,477
| 4.832512
| 0.44335
| 0.071356
| 0.071356
| 0.09684
| 0.445464
| 0.347604
| 0.252803
| 0.173293
| 0.173293
| 0.095821
| 0
| 0.1
| 0.16046
| 1,477
| 55
| 152
| 26.854545
| 0.691129
| 0.237644
| 0
| 0.096774
| 0
| 0
| 0.399464
| 0.320822
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ea32c16e86b4071267eb26a711d79f81eaea56
| 2,925
|
py
|
Python
|
xos/hpc_observer/steps/sync_originserver.py
|
wathsalav/xos
|
f6bcaa37a948ee41729236afe7fce0802e002404
|
[
"Apache-2.0"
] | null | null | null |
xos/hpc_observer/steps/sync_originserver.py
|
wathsalav/xos
|
f6bcaa37a948ee41729236afe7fce0802e002404
|
[
"Apache-2.0"
] | null | null | null |
xos/hpc_observer/steps/sync_originserver.py
|
wathsalav/xos
|
f6bcaa37a948ee41729236afe7fce0802e002404
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import base64
from django.db.models import F, Q
from xos.config import Config
from observer.syncstep import SyncStep
from core.models import Service
from hpc.models import ServiceProvider, ContentProvider, CDNPrefix, OriginServer
from util.logger import Logger, logging
# hpclibrary will be in steps/..
parentdir = os.path.join(os.path.dirname(__file__),"..")
sys.path.insert(0,parentdir)
from hpclib import HpcLibrary
logger = Logger(level=logging.INFO)
class SyncOriginServer(SyncStep, HpcLibrary):
provides=[OriginServer]
requested_interval=0
def __init__(self, **args):
SyncStep.__init__(self, **args)
HpcLibrary.__init__(self)
def fetch_pending(self, deleted):
#self.consistency_check()
return SyncStep.fetch_pending(self, deleted)
def consistency_check(self):
# set to true if something changed
result=False
# sanity check to make sure our PS objects have CMI objects behind them
all_ors_ids = [x["origin_server_id"] for x in self.client.onev.ListAll("OriginServer")]
for ors in OriginServer.objects.all():
if (ors.origin_server_id is not None) and (ors.origin_server_id not in all_ors_ids):
# we have an origin server ID, but it doesn't exist in the CMI
# something went wrong
# start over
logger.info("origin server %s was not found on CMI" % ors.origin_server_id)
ors.origin_server_id=None
ors.save()
result = True
return result
def sync_record(self, ors):
logger.info("sync'ing origin server %s" % str(ors))
if (not ors.contentProvider) or (not ors.contentProvider.content_provider_id):
return
cpid = ors.contentProvider.content_provider_id
# validation requires URL start with http://
url = ors.url
if not url.startswith("http://"):
url = "http://" + url
ors_dict = {"authenticated_content": ors.authenticated, "zone_redirects": ors.redirects, "content_provider_id": cpid, "url": url, "service_type": "HyperCache", "caching_type": "Optimistic", "description": ors.description}
#print os_dict
if not ors.origin_server_id:
id = self.client.onev.Create("OriginServer", ors_dict)
ors.origin_server_id = id
else:
self.client.onev.Update("OriginServer", ors.origin_server_id, ors_dict)
# ... something breaks (analytics) if the URL starts with http://, so we
# change it in cob after we added it via onev.
url = url[7:]
self.client.cob.UpdateContent(ors.origin_server_id, {"url": url})
ors.silent = True
ors.save()
def delete_record(self, m):
if m.origin_server_id is not None:
self.client.onev.Delete("OriginServer", m.origin_server_id)
| 34.411765
| 229
| 0.654701
| 380
| 2,925
| 4.873684
| 0.381579
| 0.090713
| 0.090713
| 0.073434
| 0.104752
| 0.024838
| 0
| 0
| 0
| 0
| 0
| 0.002275
| 0.248547
| 2,925
| 84
| 230
| 34.821429
| 0.840309
| 0.144274
| 0
| 0.038462
| 0
| 0
| 0.103171
| 0.00843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.192308
| 0.019231
| 0.403846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ea437d66df34d28efcf808ad16c896dadcac76
| 400
|
py
|
Python
|
main.py
|
aroxby/pixel-processor
|
9cfe260a085ced0883ce8b0a35c28020f4aa8737
|
[
"MIT"
] | null | null | null |
main.py
|
aroxby/pixel-processor
|
9cfe260a085ced0883ce8b0a35c28020f4aa8737
|
[
"MIT"
] | null | null | null |
main.py
|
aroxby/pixel-processor
|
9cfe260a085ced0883ce8b0a35c28020f4aa8737
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from PIL import Image
def tranform(r, g, b):
tmp = b
b = g // 2
g = tmp
r = r // 2
return r, g, b
def main():
im = Image.open('blue-flames.jpg')
input_pixels = im.getdata()
output_pixels = tuple(tranform(*pixel) for pixel in input_pixels)
im.putdata(output_pixels)
im.save('green-flames.png')
if __name__ == '__main__':
main()
| 17.391304
| 69
| 0.6
| 62
| 400
| 3.677419
| 0.580645
| 0.105263
| 0.026316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010067
| 0.255
| 400
| 22
| 70
| 18.181818
| 0.755034
| 0.0525
| 0
| 0
| 0
| 0
| 0.103175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9eab80495274dd2446a7b029f17be91df29a452
| 1,539
|
py
|
Python
|
scipy/weave/examples/swig2_example.py
|
lesserwhirls/scipy-cwt
|
ee673656d879d9356892621e23ed0ced3d358621
|
[
"BSD-3-Clause"
] | 8
|
2015-10-07T00:37:32.000Z
|
2022-01-21T17:02:33.000Z
|
scipy/weave/examples/swig2_example.py
|
lesserwhirls/scipy-cwt
|
ee673656d879d9356892621e23ed0ced3d358621
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/weave/examples/swig2_example.py
|
lesserwhirls/scipy-cwt
|
ee673656d879d9356892621e23ed0ced3d358621
|
[
"BSD-3-Clause"
] | 8
|
2015-05-09T14:23:57.000Z
|
2018-11-15T05:56:00.000Z
|
"""Simple example to show how to use weave.inline on SWIG2 wrapped
objects. SWIG2 refers to SWIG versions >= 1.3.
To run this example you must build the trivial SWIG2 extension called
swig2_ext. To do this you need to do something like this::
$ swig -c++ -python -I. -o swig2_ext_wrap.cxx swig2_ext.i
$ g++ -Wall -O2 -I/usr/include/python2.3 -fPIC -I. -c \
-o swig2_ext_wrap.os swig2_ext_wrap.cxx
$ g++ -shared -o _swig2_ext.so swig2_ext_wrap.os \
-L/usr/lib/python2.3/config
The files swig2_ext.i and swig2_ext.h are included in the same
directory that contains this file.
Note that weave's SWIG2 support works fine whether SWIG_COBJECT_TYPES
are used or not.
Author: Prabhu Ramachandran
Copyright (c) 2004, Prabhu Ramachandran
License: BSD Style.
"""
# Import our SWIG2 wrapped library
import swig2_ext
import scipy.weave as weave
from scipy.weave import swig2_spec, converters
# SWIG2 support is not enabled by default. We do this by adding the
# swig2 converter to the default list of converters.
converters.default.insert(0, swig2_spec.swig2_converter())
def test():
"""Instantiate the SWIG wrapped object and then call its method
from C++ using weave.inline
"""
a = swig2_ext.A()
b = swig2_ext.foo() # This will be an APtr instance.
b.thisown = 1 # Prevent memory leaks.
code = """a->f();
b->f();
"""
weave.inline(code, ['a', 'b'], include_dirs=['.'],
headers=['"swig2_ext.h"'], verbose=1)
if __name__ == "__main__":
test()
| 28.5
| 69
| 0.690058
| 246
| 1,539
| 4.186992
| 0.520325
| 0.100971
| 0.046602
| 0.025243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030204
| 0.204029
| 1,539
| 53
| 70
| 29.037736
| 0.810612
| 0.684211
| 0
| 0
| 0
| 0
| 0.149451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9eba9b75a6e45fee4cdfe3d81874f5e8476b939
| 1,951
|
py
|
Python
|
src/simplify.py
|
denghz/Probabilistic-Programming
|
fa505a75c4558e507fd3effd2737c63537bfe50d
|
[
"BSD-3-Clause"
] | null | null | null |
src/simplify.py
|
denghz/Probabilistic-Programming
|
fa505a75c4558e507fd3effd2737c63537bfe50d
|
[
"BSD-3-Clause"
] | null | null | null |
src/simplify.py
|
denghz/Probabilistic-Programming
|
fa505a75c4558e507fd3effd2737c63537bfe50d
|
[
"BSD-3-Clause"
] | null | null | null |
from wolframclient.language.expression import WLSymbol
from nnDiff import *
def parseGlobalSymbol(s):
if isinstance(s, numbers.Number):
return s
if isinstance(s, WLSymbol):
if s.name == 'E':
return 'E'
else:
return s.name[7:]
def parse(exp):
symbol = parseGlobalSymbol(exp)
if symbol:
return [symbol]
else:
f = str(exp.head)
args = list(map(parse, exp.args))
res = []
if (f == "Power"):
res1 = []
p = args[1][0]
e = args[0]
if e == ['E']:
return ['Exp'] + args[1]
if p < 0:
res = ["Inv"]
p = -p
if p >= 2:
p = p - 2
res1 = ["Times"] + e + e
while p > 0:
p = p - 1
res1 = ["Times"] + res1 + e
return res + res1
else:
return res + e
else:
if len(args) == 1:
return [f] + args[0]
elif len(args) >= 2:
res = [f] + args[0] + args[1]
args = args[2:]
for arg in args:
res = [f] + res + arg
return res
def simplify(exp):
with WolframLanguageSession() as session:
session.evaluate("Inv[zzz_] := 1/zzz")
f = wlexpr(str(Func(exp)))
getfreeVars = wlexpr("Reduce`FreeVariables")
freeVariables = session.evaluate(getfreeVars(f))
ass = wl.Element(wl.Alternatives(freeVariables), wl.Reals)
wmres = session.evaluate(wl.FullSimplify(f,ass))
print(wmres)
res = parse(wmres)
return res
if __name__ == "__main__":
exp = sys.argv[1:]
if exp == []:
exp = ["Sin", "x"]
res = map(str,simplify(exp))
print(' '.join(res), file=sys.stderr)
| 27.097222
| 67
| 0.438237
| 212
| 1,951
| 3.990566
| 0.334906
| 0.023641
| 0.030733
| 0.033097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020814
| 0.433624
| 1,951
| 72
| 68
| 27.097222
| 0.744796
| 0
| 0
| 0.096774
| 0
| 0
| 0.038422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.032258
| 0
| 0.241935
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9eda494aa9f90de7b3474adbd78e46927f9990c
| 406
|
py
|
Python
|
src/cart/forms.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/cart/forms.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/cart/forms.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
from django import forms
from django.utils.translation import gettext_lazy as _
COURSE_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 21)]
class CartAddCourseForm(forms.Form):
quantity = forms.TypedChoiceField(
choices=COURSE_QUANTITY_CHOICES, coerce=int, label=_("Quantité")
)
override = forms.BooleanField(
required=False, initial=False, widget=forms.HiddenInput
)
| 27.066667
| 72
| 0.726601
| 49
| 406
| 5.877551
| 0.693878
| 0.069444
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008955
| 0.174877
| 406
| 14
| 73
| 29
| 0.850746
| 0
| 0
| 0
| 0
| 0
| 0.019704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9edd7dbf25e820fdbc6faa76fd63ef5d9d3ec94
| 1,090
|
py
|
Python
|
appengine/components/tests/datastore_utils_properties_test.py
|
pombreda/swarming
|
c70f311f3db8f25752c793a0d7b36cf537d95580
|
[
"Apache-2.0"
] | null | null | null |
appengine/components/tests/datastore_utils_properties_test.py
|
pombreda/swarming
|
c70f311f3db8f25752c793a0d7b36cf537d95580
|
[
"Apache-2.0"
] | null | null | null |
appengine/components/tests/datastore_utils_properties_test.py
|
pombreda/swarming
|
c70f311f3db8f25752c793a0d7b36cf537d95580
|
[
"Apache-2.0"
] | 1
|
2021-12-06T03:37:36.000Z
|
2021-12-06T03:37:36.000Z
|
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import sys
import unittest
import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components.datastore_utils import properties
from support import test_case
class BP(ndb.Model):
prop = properties.BytesComputedProperty(lambda _: '\x00')
class DJP(ndb.Model):
prop = properties.DeterministicJsonProperty(json_type=dict)
class PropertiesTest(test_case.TestCase):
def test_DeterministicJsonProperty(self):
self.assertEqual({'a': 1}, DJP(prop={'a': 1}).prop)
DJP(prop={'a': 1}).put()
self.assertEqual({'a': 1}, DJP.query().get().prop)
with self.assertRaises(TypeError):
DJP(prop=[])
def test_BytesComputedProperty(self):
self.assertEqual('\x00', BP().prop)
BP().put()
self.assertEqual('\x00', BP.query().get().prop)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
| 23.695652
| 76
| 0.713761
| 150
| 1,090
| 5.06
| 0.52
| 0.079051
| 0.031621
| 0.057971
| 0.052701
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017279
| 0.150459
| 1,090
| 45
| 77
| 24.222222
| 0.802376
| 0.165138
| 0
| 0
| 0
| 0
| 0.028698
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ef252652f99c5c9feffaab6f06bdbb7fe7dd89
| 953
|
py
|
Python
|
covfefe/covfefe.py
|
fixator10/Trusty-cogs
|
3d47a63f562cb64eb44da6bb53cfe9f8324026e7
|
[
"MIT"
] | 148
|
2017-04-23T19:57:50.000Z
|
2022-03-12T06:59:58.000Z
|
covfefe/covfefe.py
|
mina9999/Trusty-cogs
|
a47de7c233f3c1802effd29f4a86f8a9b0e2b34a
|
[
"MIT"
] | 155
|
2018-01-01T13:27:45.000Z
|
2022-03-12T05:17:51.000Z
|
covfefe/covfefe.py
|
mina9999/Trusty-cogs
|
a47de7c233f3c1802effd29f4a86f8a9b0e2b34a
|
[
"MIT"
] | 221
|
2017-04-02T00:26:08.000Z
|
2022-03-26T15:06:54.000Z
|
import re
import discord
from redbot.core import commands
class Covfefe(commands.Cog):
"""
Convert almost any word into covfefe
"""
def __init__(self, bot):
self.bot = bot
async def covfefe(self, x, k="aeiouy])"):
"""
https://codegolf.stackexchange.com/a/123697
"""
try:
b, c, v = re.findall(f"(.*?[{k}([^{k}.*?([{k}", x)[0]
return b + c + (("bcdfgkpstvz" + c)["pgtvkgbzdfs".find(c)] + v) * 2
except IndexError:
return None
async def red_delete_data_for_user(self, **kwargs):
"""
Nothing to delete
"""
return
@commands.command()
async def covefy(self, ctx, msg):
"""Convert almost any word into covfefe"""
newword = await self.covfefe(msg)
if newword is not None:
await ctx.send(newword)
else:
await ctx.send("I cannot covfefeify that word")
| 24.435897
| 79
| 0.541448
| 115
| 953
| 4.417391
| 0.582609
| 0.047244
| 0.062992
| 0.07874
| 0.122047
| 0.122047
| 0
| 0
| 0
| 0
| 0
| 0.012365
| 0.321091
| 953
| 38
| 80
| 25.078947
| 0.772798
| 0.037775
| 0
| 0
| 0
| 0
| 0.107856
| 0.029294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ef4b5c2209cb05949e60eccf8cd9158602e350
| 4,784
|
py
|
Python
|
exp_gqa/test.py
|
ronghanghu/gqa_single_hop_baseline
|
332d342da60dfefd40f2364d60215ed2f191aa2d
|
[
"BSD-2-Clause"
] | 19
|
2019-08-19T18:09:26.000Z
|
2021-08-29T15:58:30.000Z
|
exp_gqa/test.py
|
ronghanghu/gqa_single_hop_baseline
|
332d342da60dfefd40f2364d60215ed2f191aa2d
|
[
"BSD-2-Clause"
] | 1
|
2019-11-24T14:36:29.000Z
|
2019-12-11T08:33:12.000Z
|
exp_gqa/test.py
|
ronghanghu/gqa_single_hop_baseline
|
332d342da60dfefd40f2364d60215ed2f191aa2d
|
[
"BSD-2-Clause"
] | 1
|
2019-10-30T05:55:52.000Z
|
2019-10-30T05:55:52.000Z
|
import os
import numpy as np
import tensorflow as tf
from models_gqa.model import Model
from models_gqa.config import build_cfg_from_argparse
from util.gqa_train.data_reader import DataReader
import json
# Load config
cfg = build_cfg_from_argparse()
# Start session
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.GPU_ID)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=cfg.GPU_MEM_GROWTH)))
# Data files
imdb_file = cfg.IMDB_FILE % cfg.TEST.SPLIT_VQA
scene_graph_file = cfg.SCENE_GRAPH_FILE % \
cfg.TEST.SPLIT_VQA.replace('_balanced', '').replace('_all', '')
data_reader = DataReader(
imdb_file, shuffle=False, one_pass=True, batch_size=cfg.TEST.BATCH_SIZE,
T_encoder=cfg.T_ENCODER,
vocab_question_file=cfg.VOCAB_QUESTION_FILE,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE,
feature_type=cfg.FEAT_TYPE,
spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,
objects_feature_dir=cfg.OBJECTS_FEATURE_DIR,
objects_max_num=cfg.W_FEAT,
scene_graph_file=scene_graph_file,
vocab_name_file=cfg.VOCAB_NAME_FILE,
vocab_attr_file=cfg.VOCAB_ATTR_FILE,
spatial_pos_enc_dim=cfg.SPATIAL_POS_ENC_DIM,
bbox_tile_num=cfg.BBOX_TILE_NUM)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
# Inputs and model
input_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_feat_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT, cfg.D_FEAT])
image_valid_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT])
model = Model(
input_seq_batch, seq_length_batch, image_feat_batch, image_valid_batch,
num_vocab=num_vocab, num_choices=num_choices, is_training=False)
# Load snapshot
if cfg.TEST.USE_EMA:
ema = tf.train.ExponentialMovingAverage(decay=0.9) # decay doesn't matter
var_names = {
(ema.average_name(v) if v in model.params else v.op.name): v
for v in tf.global_variables()}
else:
var_names = {v.op.name: v for v in tf.global_variables()}
snapshot_file = cfg.TEST.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TEST.ITER)
print('loading model snapshot from %s' % snapshot_file)
snapshot_saver = tf.train.Saver(var_names)
snapshot_saver.restore(sess, snapshot_file)
print('Done')
# Write results
result_dir = cfg.TEST.RESULT_DIR % (cfg.EXP_NAME, cfg.TEST.ITER)
os.makedirs(result_dir, exist_ok=True)
# Run test
answer_correct, num_questions = 0, 0
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions = []
answer_word_list = data_reader.batch_loader.answer_dict.word_list
pred_file = os.path.join(
result_dir, 'gqa_eval_preds_%s_%s_%08d.json' % (
cfg.TEST.SPLIT_VQA, cfg.EXP_NAME, cfg.TEST.ITER))
for n_batch, batch in enumerate(data_reader.batches()):
if 'answer_label_batch' not in batch:
batch['answer_label_batch'] = -np.ones(
len(batch['qid_list']), np.int32)
if num_questions == 0:
print('imdb has no answer labels. Using dummy labels.\n\n'
'**The final accuracy will be zero (no labels provided)**\n')
vqa_scores_value = sess.run(model.vqa_scores, feed_dict={
input_seq_batch: batch['input_seq_batch'],
seq_length_batch: batch['seq_length_batch'],
image_feat_batch: batch['image_feat_batch'],
image_valid_batch: batch['image_valid_batch']})
# compute accuracy
vqa_labels = batch['answer_label_batch']
vqa_predictions = np.argmax(vqa_scores_value, axis=1)
answer_correct += np.sum(vqa_predictions == vqa_labels)
num_questions += len(vqa_labels)
accuracy = answer_correct / num_questions
if n_batch % 20 == 0:
print('exp: %s, iter = %d, accumulated accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions.extend([
{"questionId": qId, "prediction": answer_word_list[p]}
for qId, p in zip(batch['qid_list'], vqa_predictions)])
with open(os.path.join(
result_dir, 'vqa_results_%s.txt' % cfg.TEST.SPLIT_VQA), 'w') as f:
print('\nexp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
print('exp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions), file=f)
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
with open(pred_file, 'w') as f:
json.dump(output_predictions, f, indent=2)
print('prediction file written to %s' % pred_file)
| 40.201681
| 79
| 0.713002
| 736
| 4,784
| 4.328804
| 0.256793
| 0.043942
| 0.026365
| 0.032957
| 0.315443
| 0.284683
| 0.205273
| 0.161017
| 0.161017
| 0.134024
| 0
| 0.005542
| 0.170151
| 4,784
| 118
| 80
| 40.542373
| 0.796977
| 0.026756
| 0
| 0.082474
| 0
| 0.030928
| 0.125054
| 0.006457
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.010309
| 0.072165
| 0
| 0.072165
| 0.072165
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9efb93e53325ce5948d495ecf3a99ce26893591
| 2,071
|
py
|
Python
|
extract_gear/armor_visitor.py
|
kamerons/dde-extract-gear
|
44464ae470bd5de6279d32e3587b469ce006ea42
|
[
"Apache-2.0"
] | null | null | null |
extract_gear/armor_visitor.py
|
kamerons/dde-extract-gear
|
44464ae470bd5de6279d32e3587b469ce006ea42
|
[
"Apache-2.0"
] | null | null | null |
extract_gear/armor_visitor.py
|
kamerons/dde-extract-gear
|
44464ae470bd5de6279d32e3587b469ce006ea42
|
[
"Apache-2.0"
] | null | null | null |
class ArmorVisitor:
def __init__(self, num_pages, first_page_col_start, first_page_row_start,
last_page_row_start, last_page_col_end, last_page_row_end, num_col_page=5, num_row_page=3):
self.num_pages = num_pages
self.first_page_col_start = first_page_col_start
self.first_page_row_start = first_page_row_start
self.last_page_row_start = last_page_row_start
self.last_page_col_end = last_page_col_end
self.last_page_row_end = last_page_row_end
self.num_col_page = num_col_page
self.num_row_page = num_row_page
def iterate(self, callback):
for page_num in range(1, self.num_pages + 1):
page = self.create_page(page_num)
i = 0
for coord in page:
callback(coord, page_num, i)
i += 1
def create_page(self, page_num):
if page_num == 1:
last_col = self.num_col_page if self.num_pages > 1 else self.last_page_col_end
last_row = self.num_row_page if self.num_pages > 1 else self.last_page_row_end
page = Page(self.first_page_col_start, self.first_page_row_start, last_col, last_row, self.num_col_page)
elif page_num == self.num_pages:
page = Page(1, self.last_page_row_start,
self.last_page_col_end, self.last_page_row_end, self.num_col_page)
else:
page = Page(1, 1, self.num_col_page, self.num_row_page, self.num_col_page)
return page
class Page:
def __init__(self, start_col, start_row, last_col, last_row, num_col_page=5):
self.start_col = start_col
self.start_row = start_row
self.last_col = last_col
self.last_row = last_row
self.num_col_page = num_col_page
def __iter__(self):
self.cur_row = self.start_row
self.cur_col = self.start_col
return self
def __next__(self):
position = (self.cur_row, self.cur_col)
if self.cur_row > self.last_row or (self.cur_col > self.last_col and self.cur_row == self.last_row):
raise StopIteration
elif self.cur_col == self.num_col_page:
self.cur_col = 1
self.cur_row += 1
else:
self.cur_col += 1
return position
| 32.873016
| 110
| 0.718493
| 360
| 2,071
| 3.680556
| 0.105556
| 0.089811
| 0.090566
| 0.084528
| 0.490566
| 0.436981
| 0.315472
| 0.230943
| 0.199245
| 0.05283
| 0
| 0.00965
| 0.199421
| 2,071
| 62
| 111
| 33.403226
| 0.789505
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0
| 0
| 0.22
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9f401385afbe018601c2bef20e53c9b587fb7df
| 485
|
py
|
Python
|
examples/test_scalar_field.py
|
gemini3d/pv-gemini
|
99dff15b43a2c93cbcb63d2f8946d425d0555ef3
|
[
"Apache-2.0"
] | null | null | null |
examples/test_scalar_field.py
|
gemini3d/pv-gemini
|
99dff15b43a2c93cbcb63d2f8946d425d0555ef3
|
[
"Apache-2.0"
] | null | null | null |
examples/test_scalar_field.py
|
gemini3d/pv-gemini
|
99dff15b43a2c93cbcb63d2f8946d425d0555ef3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
example of 3D scalar field
If you get this error, ParaView doesn't know your data file format:
TypeError: TestFileReadability argument %Id: %V
"""
from pathlib import Path
import argparse
import paraview.simple as pvs
p = argparse.ArgumentParser()
p.add_argument("fn", help="data file to load with paraview OpenDataFile()")
P = p.parse_args()
fn = Path(P.fn).expanduser()
if not fn.is_file():
raise FileNotFoundError(fn)
pvs.OpenDataFile(str(fn))
| 20.208333
| 75
| 0.740206
| 74
| 485
| 4.810811
| 0.702703
| 0.044944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004819
| 0.14433
| 485
| 23
| 76
| 21.086957
| 0.853012
| 0.340206
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9f4182f4b0683cbf4f51c72cef042f5acb55553
| 341
|
py
|
Python
|
src/cms/forms/languages/language_form.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | null | null | null |
src/cms/forms/languages/language_form.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | null | null | null |
src/cms/forms/languages/language_form.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from ...models import Language
class LanguageForm(forms.ModelForm):
"""
Form for creating and modifying language objects
"""
class Meta:
model = Language
fields = [
"code",
"english_name",
"native_name",
"text_direction",
]
| 17.947368
| 52
| 0.548387
| 31
| 341
| 5.935484
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.363636
| 341
| 18
| 53
| 18.944444
| 0.847926
| 0.140762
| 0
| 0
| 0
| 0
| 0.148014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9f437d2e63f9838da4ffa0491804e95e149a773
| 1,482
|
py
|
Python
|
search/forms.py
|
gregneagle/sal
|
74c583fb1c1b33d3201b308b147376b3dcaca33f
|
[
"Apache-2.0"
] | 2
|
2019-11-01T20:50:35.000Z
|
2021-01-13T22:02:55.000Z
|
search/forms.py
|
gregneagle/sal
|
74c583fb1c1b33d3201b308b147376b3dcaca33f
|
[
"Apache-2.0"
] | null | null | null |
search/forms.py
|
gregneagle/sal
|
74c583fb1c1b33d3201b308b147376b3dcaca33f
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from .models import *
from server.models import *
class ChoiceFieldNoValidation(forms.ChoiceField):
def validate(self, value):
pass
class SaveSearchForm(forms.ModelForm):
class Meta:
model = SavedSearch
fields = ('name',)
class SearchRowForm(forms.ModelForm):
skip_fields = [
'id',
'machine_group',
'report',
'activity',
'errors',
'warnings',
'install_log',
'puppet_errors',
'install_log_hash'
]
search_fields = []
for f in Machine._meta.fields:
if f.name not in skip_fields:
add = (f.name,f.name,)
search_fields.append(add)
search_field = ChoiceFieldNoValidation(choices=sorted(search_fields))
and_or = ChoiceFieldNoValidation(choices=AND_OR_CHOICES)
def __init__(self, *args, **kwargs):
self.search_group = kwargs.pop('search_group', None)
super(SearchRowForm, self).__init__(*args, **kwargs)
try:
search_group_count = self.search_group.searchrow_set.count()
except:
search_group_count = 0
if search_group_count == 0 and self.search_group:
self.fields['and_or'] = ChoiceFieldNoValidation(
initial='AND',
widget=forms.HiddenInput()
)
class Meta:
model = SearchRow
fields = ('search_models', 'search_field', 'and_or', 'operator','search_term',)
| 27.962264
| 87
| 0.609312
| 155
| 1,482
| 5.574194
| 0.412903
| 0.08912
| 0.052083
| 0.078704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001881
| 0.282726
| 1,482
| 52
| 88
| 28.5
| 0.810913
| 0
| 0
| 0.045455
| 0
| 0
| 0.106613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.022727
| 0.068182
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9f67833672023bef782862284907976acb9371f
| 2,216
|
py
|
Python
|
newsparser.py
|
antoreep-jana/BBC-News-Analyzer
|
0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca
|
[
"MIT"
] | 1
|
2021-12-27T12:57:07.000Z
|
2021-12-27T12:57:07.000Z
|
newsparser.py
|
antoreep-jana/BBC-News-Analyzer
|
0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca
|
[
"MIT"
] | null | null | null |
newsparser.py
|
antoreep-jana/BBC-News-Analyzer
|
0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup as bs
import requests
class BBC:
def __init__(self, url:str):
article = requests.get(url)
self.soup = bs(article.content, "html.parser")
#print(dir(self.soup))
#print(self.soup.h1.text)
self.body = self.get_body()
self.link = url
self.title = self.get_title()
self.author = self.get_author()
self.images = self.get_images()
self.date = self.get_date()
#author = self.soup.find
#date = self.soup
#for img in imgs:
# print(img['src'])
paras = self.soup.find_all('div', {"class" : "ssrcss-17j9f6r-RichTextContainer e5tfeyi1"})
#for para in paras:
# print(para.text)
def get_body(self) -> list:
#body = self.soup.find(property="articleBody")
paras = self.soup.find_all('div', {"class" : "ssrcss-17j9f6r-RichTextContainer e5tfeyi1"})
#for para in paras:
# print(para.text)
return [p.text for p in paras]
#return [p.text for p in body.find_all("p")]
def get_title(self) -> str:
#return self.soup.find(class_="story-body__h1").text
return self.soup.h1.text
def get_author(self) -> str:
author = self.soup.find('p', {'class' : 'ssrcss-1rv0moy-Contributor e5xb54n2'})
return author.text.replace("BBC News", "")
def get_images(self) -> list:
imgs = self.soup.find_all('figure', {'class' : 'ssrcss-wpgbih-StyledFigure e34k3c23'})
imgs_lst = []
for img in imgs:
try:
if "blank_white_space" not in img.img['src']:
imgs_lst.append(img.img['src'])#['div']['span']['span']['img'])
except:
pass
return imgs_lst
def get_date(self) -> str:
date = self.soup.find_all('time')[0]
return date['datetime']
parsed = BBC("https://www.bbc.co.uk/news/world-europe-49345912")
#print(parsed.title)
#print(parsed.link)
#print(parsed.author)
#print(parsed.date)
#print(parsed.title)
#print(parsed.body)
#print(parsed.images)
#print(parsed.body)
| 28.410256
| 98
| 0.564982
| 278
| 2,216
| 4.406475
| 0.291367
| 0.084898
| 0.078367
| 0.04898
| 0.223673
| 0.179592
| 0.151837
| 0.151837
| 0.151837
| 0.151837
| 0
| 0.022872
| 0.289711
| 2,216
| 77
| 99
| 28.779221
| 0.7554
| 0.229242
| 0
| 0.057143
| 0
| 0
| 0.170326
| 0.068843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171429
| false
| 0.028571
| 0.057143
| 0.028571
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9f73f41171ea9b93f4f79bc336c9fe6927dba89
| 2,044
|
py
|
Python
|
SIR_model-Copy.Caroline.1.py
|
Caroline-Odevall/final-project-team-18
|
fbf00ae4ec554dee9245a9834ff4108b3d339842
|
[
"MIT"
] | null | null | null |
SIR_model-Copy.Caroline.1.py
|
Caroline-Odevall/final-project-team-18
|
fbf00ae4ec554dee9245a9834ff4108b3d339842
|
[
"MIT"
] | null | null | null |
SIR_model-Copy.Caroline.1.py
|
Caroline-Odevall/final-project-team-18
|
fbf00ae4ec554dee9245a9834ff4108b3d339842
|
[
"MIT"
] | null | null | null |
# In[42]:
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# In[43]:
# describe the model
def deriv(y, t, N, beta, gamma, delta):
S, E, I, R = y
dSdt = -beta * S * I / N # S(t) – susceptible (de som är mottagliga för infektion).
dEdt = beta * S * I / N - gamma * E
dIdt = delta * E - gamma * I # I(t) – infected (de som har pågående infektion)
dRdt = gamma * I
return dSdt, dEdt, dIdt, dRdt
# In[44]:
# describe the parameters
N = 2283 #Totala befolkningen N=s(t)+I(t)+R(t)
D = 4.0 #infections last four days
gamma = 1.0 / D #Reoval rate (Hur många som tillfrisknar)
delta = 1.0 / 5.0 #incubation period of five days
R_0 = 2.5 #Reproduktionstalet
beta = R_0 * gamma #r_0=beta/gamma. antal som smittas per infekterad och per tid (beror på virusets egenskaper samt hur vi beter oss).
S0, E0, I0, R0 = N-1, 1, 0, 0 # initial conditions: one infected, rest susceptible
#Rt = R0 * S(t)/Ntot* (1 – b). b = effekt av policy och beteendeförändringar
# In[45]:
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta))
S, E, I, R = ret.T
# In[46]:
def plotsir(t, S, E, I, R):
f, ax = plt.subplots(1,1,figsize=(10,4))
ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible')
ax.plot(t, E, 'y', alpha=0.7, linewidth=2, label='Exposed')
ax.plot(t, I, 'r', alpha=0.7, linewidth=2, label='Infected')
ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered')
ax.set_xlabel('Time (days)')
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.savefig('Plot.png')
plt.show();
# plot the graph
# In[47]:
plotsir(t, S, E, I, R)
# In[ ]:
| 24.333333
| 137
| 0.630137
| 364
| 2,044
| 3.516484
| 0.436813
| 0.007813
| 0.009375
| 0.0125
| 0.151563
| 0.151563
| 0.029688
| 0.029688
| 0
| 0
| 0
| 0.04473
| 0.201566
| 2,044
| 83
| 138
| 24.626506
| 0.737745
| 0.354207
| 0
| 0
| 0
| 0
| 0.064241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.078947
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9f8215f5040fa71b2646d52a053545a92c3fd12
| 1,681
|
py
|
Python
|
app/middleware/cache_headers.py
|
Niclnx/service-stac
|
ad9129a7130d09b2bed387d8e82575eb86fdfa7b
|
[
"BSD-3-Clause"
] | 9
|
2020-08-17T11:01:48.000Z
|
2022-01-17T22:24:13.000Z
|
app/middleware/cache_headers.py
|
Niclnx/service-stac
|
ad9129a7130d09b2bed387d8e82575eb86fdfa7b
|
[
"BSD-3-Clause"
] | 100
|
2020-08-14T05:56:40.000Z
|
2022-03-01T22:39:58.000Z
|
app/middleware/cache_headers.py
|
Niclnx/service-stac
|
ad9129a7130d09b2bed387d8e82575eb86fdfa7b
|
[
"BSD-3-Clause"
] | 3
|
2020-09-02T14:01:07.000Z
|
2021-07-27T06:30:26.000Z
|
import logging
import re
from urllib.parse import urlparse
from django.conf import settings
from django.utils.cache import add_never_cache_headers
from django.utils.cache import patch_cache_control
from django.utils.cache import patch_response_headers
logger = logging.getLogger(__name__)
STAC_BASE = settings.STAC_BASE
STAC_BASE_V = settings.STAC_BASE_V
class CacheHeadersMiddleware:
'''Middleware that adds appropriate cache headers to GET and HEAD methods.
NOTE: /checker, /get-token, /metrics and /{healthcheck} endpoints are marked as never cache.
'''
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
# match /xxx or /api/stac/xxx
# f.ex. /metrics, /checker, /api/stac/{healthcheck}, /api/stac/get-token
if re.match(fr'^(/{STAC_BASE})?/\w+$', request.path):
add_never_cache_headers(response)
elif (
request.method in ('GET', 'HEAD') and
not request.path.startswith(urlparse(settings.STATIC_URL).path)
):
logger.debug(
"Patching cache headers for request %s %s",
request.method,
request.path,
extra={"request": request}
)
patch_response_headers(response, settings.CACHE_MIDDLEWARE_SECONDS)
patch_cache_control(response, public=True)
return response
| 32.960784
| 96
| 0.662701
| 207
| 1,681
| 5.188406
| 0.415459
| 0.037244
| 0.041899
| 0.055866
| 0.150838
| 0.126629
| 0.068901
| 0.068901
| 0
| 0
| 0
| 0
| 0.250446
| 1,681
| 50
| 97
| 33.62
| 0.852381
| 0.252826
| 0
| 0
| 0
| 0
| 0.06068
| 0.01699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.233333
| 0
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9f87264f50f9243a592053fcbe97aca0b8c2377
| 2,818
|
py
|
Python
|
mmdet/models/detectors/knowledge_distilling/kd_single_stage.py
|
anorthman/mmdetection
|
52e28154364f0e19d11c206bb357d88f29fc4a2d
|
[
"Apache-2.0"
] | 5
|
2019-06-11T11:08:54.000Z
|
2021-03-25T10:06:01.000Z
|
mmdet/models/detectors/knowledge_distilling/kd_single_stage.py
|
anorthman/mmdetection
|
52e28154364f0e19d11c206bb357d88f29fc4a2d
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/detectors/knowledge_distilling/kd_single_stage.py
|
anorthman/mmdetection
|
52e28154364f0e19d11c206bb357d88f29fc4a2d
|
[
"Apache-2.0"
] | 1
|
2019-06-11T11:08:55.000Z
|
2019-06-11T11:08:55.000Z
|
# author huangchuanhong
import torch
from mmcv.runner import load_checkpoint
from ..base import BaseDetector
from ..single_stage import SingleStageDetector
from ...registry import DETECTORS
from ...builder import build_detector
@DETECTORS.register_module
class KDSingleStageDetector(SingleStageDetector):
def __init__(self,
backbone,
teacher,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KDSingleStageDetector, self).__init__(backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
self.teacher_detector = build_detector(teacher.model, train_cfg=None, test_cfg=test_cfg)
load_checkpoint(self.teacher_detector, teacher.checkpoint)
self.teacher_detector.eval()
self.beta = train_cfg.teacher.beta
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
beta=1000.):
feats = ()
backbone_feats = self.backbone(img)
if self.train_cfg.teacher.backbone_at:
for i in self.train_cfg.teacher.backbone_at_idxes:
feats += (backbone_feats[i],)
if self.with_neck:
neck_feats = self.neck(backbone_feats)
if self.train_cfg.teacher.neck_at:
feats += neck_feats
outs = self.bbox_head(neck_feats)
else:
outs = self.bbox_head(backbone_feats)
with torch.no_grad():
t_feats = ()
t_backbone_feats = self.teacher_detector.backbone(img)
if self.train_cfg.teacher.backbone_at:
for i in self.train_cfg.teacher.backbone_at_idxes:
t_feats += (t_backbone_feats[i],)
if self.with_neck:
t_neck_feats = self.teacher_detector.neck(t_backbone_feats)
if self.train_cfg.teacher.neck_at:
t_feats += t_neck_feats
t_outs = self.teacher_detector.bbox_head(t_neck_feats)
else:
t_outs = self.teacher_detector.bbox_head(t_backbone_feats)
loss_inputs = (feats,) + outs + (t_feats,) + t_outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
| 42.059701
| 112
| 0.551455
| 297
| 2,818
| 4.895623
| 0.218855
| 0.066025
| 0.091472
| 0.078404
| 0.296424
| 0.251719
| 0.251719
| 0.213205
| 0.167813
| 0.112792
| 0
| 0.002279
| 0.377218
| 2,818
| 66
| 113
| 42.69697
| 0.826211
| 0.007452
| 0
| 0.16129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.096774
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9f8cb65181ebad752b9a810d28cc601137f1877
| 4,518
|
py
|
Python
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 681
|
2019-09-09T19:34:37.000Z
|
2022-03-31T12:17:58.000Z
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 212
|
2019-09-18T14:43:44.000Z
|
2022-03-27T22:21:00.000Z
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 157
|
2019-09-12T05:06:05.000Z
|
2022-03-29T14:47:24.000Z
|
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerDialTurnEnvV2(SawyerXYZEnv):
TARGET_RADIUS = 0.07
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.7, 0.0)
obj_high = (0.1, 0.8, 0.0)
goal_low = (-0.1, 0.73, 0.0299)
goal_high = (0.1, 0.83, 0.0301)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.7, 0.0]),
'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),
}
self.goal = np.array([0., 0.73, 0.08])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_dial.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(reward,
tcp_to_obj,
_,
target_to_obj,
object_grasped,
in_place) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= self.TARGET_RADIUS),
'near_object': float(tcp_to_obj <= 0.01),
'grasp_success': 1.,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
def _get_pos_objects(self):
dial_center = self.get_body_com('dial').copy()
dial_angle_rad = self.data.get_joint_qpos('knob_Joint_1')
offset = np.array([
np.sin(dial_angle_rad),
-np.cos(dial_angle_rad),
0
])
dial_radius = 0.05
offset *= dial_radius
return dial_center + offset
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('dial')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.prev_obs = self._get_curr_obs_combined_no_goal()
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos[:3]
final_pos = goal_pos.copy() + np.array([0, 0.03, 0.03])
self._target_pos = final_pos
self.sim.model.body_pos[self.model.body_name2id('dial')] = self.obj_init_pos
self.dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
return self._get_obs()
def compute_reward(self, action, obs):
obj = self._get_pos_objects()
dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = (obj - target)
target_to_obj = np.linalg.norm(target_to_obj)
target_to_obj_init = (self.dial_push_position - target)
target_to_obj_init = np.linalg.norm(target_to_obj_init)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=abs(target_to_obj_init - self.TARGET_RADIUS),
sigmoid='long_tail',
)
dial_reach_radius = 0.005
tcp_to_obj = np.linalg.norm(dial_push_position - tcp)
tcp_to_obj_init = np.linalg.norm(self.dial_push_position - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, dial_reach_radius),
margin=abs(tcp_to_obj_init-dial_reach_radius),
sigmoid='gaussian',
)
gripper_closed = min(max(0, action[-1]), 1)
reach = reward_utils.hamacher_product(reach, gripper_closed)
tcp_opened = 0
object_grasped = reach
reward = 10 * reward_utils.hamacher_product(reach, in_place)
return (reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place)
| 31.816901
| 93
| 0.599823
| 628
| 4,518
| 3.941083
| 0.221338
| 0.038384
| 0.053333
| 0.028283
| 0.225859
| 0.149091
| 0.098586
| 0.073535
| 0.073535
| 0.073535
| 0
| 0.036772
| 0.28973
| 4,518
| 141
| 94
| 32.042553
| 0.734497
| 0
| 0
| 0.088496
| 0
| 0
| 0.047587
| 0.005755
| 0
| 0
| 0
| 0
| 0.017699
| 1
| 0.061947
| false
| 0
| 0.044248
| 0.017699
| 0.176991
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9fa7c6bd7a253ee2a588381042c5dfd3d99cb96
| 2,560
|
py
|
Python
|
yezdi/parser/parser.py
|
ragsagar/yezdi
|
5b97bedc56d5af7f28b244a0d7c0c8259f643102
|
[
"MIT"
] | 1
|
2021-04-27T20:07:42.000Z
|
2021-04-27T20:07:42.000Z
|
yezdi/parser/parser.py
|
ragsagar/yezdi
|
5b97bedc56d5af7f28b244a0d7c0c8259f643102
|
[
"MIT"
] | null | null | null |
yezdi/parser/parser.py
|
ragsagar/yezdi
|
5b97bedc56d5af7f28b244a0d7c0c8259f643102
|
[
"MIT"
] | null | null | null |
from yezdi.lexer.token import TokenType
from yezdi.parser.ast import Program, Statement, Participant, Title, LineStatement
class Parser:
def __init__(self, lexer):
self.lexer = lexer
self.current_token = None
self.peek_token = None
self.next_token()
self.next_token()
self.participants = {}
def next_token(self):
self.current_token, self.peek_token = self.peek_token, self.lexer.next_token()
def parse_program(self):
program = Program()
while self.current_token.type != TokenType.EOF:
statement = self.parse_statement()
if statement:
program.statements.append(statement)
self.next_token()
return program
def parse_statement(self):
if self.current_token.type == TokenType.IDENTIFIER:
return self.parse_line_statement()
elif self.current_token.type == TokenType.TITLE:
return self.parse_title()
return None
def parse_line_statement(self):
participant_literal = self.current_token.literal
if not self.peek_token.type in [TokenType.SOLID_LINE, TokenType.DASHED_LINE]:
return None
self.next_token()
participant = Participant(participant_literal)
line = LineStatement(self.current_token.type)
line.set_source(participant)
if not self.expect_peek(TokenType.IDENTIFIER):
return None
target = Participant(self.current_token.literal)
line.set_target(target)
if not self.expect_peek(TokenType.COLON):
return None
if self.expect_peek(TokenType.IDENTIFIER):
line.set_info(self.current_token.literal)
if self.peek_token.type not in [TokenType.NEWLINE, TokenType.EOF]:
return None
statement = Statement(line)
return statement
def get_participant(self, value):
if value in self.participants:
return self.participants[value]
else:
participant = Participant(value)
self.participants[value] = participant
return participant
def expect_peek(self, token_type):
if self.peek_token.type == token_type:
self.next_token()
return True
else:
return False
def parse_title(self):
if not self.expect_peek(TokenType.IDENTIFIER):
return None
title = Title(self.current_token.literal)
return Statement(title)
class ParserError(Exception):
pass
| 32
| 86
| 0.640625
| 288
| 2,560
| 5.517361
| 0.184028
| 0.069226
| 0.100692
| 0.050346
| 0.229704
| 0.078037
| 0.060415
| 0.060415
| 0.060415
| 0
| 0
| 0
| 0.280469
| 2,560
| 79
| 87
| 32.405063
| 0.862649
| 0
| 0
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0.015152
| 0.030303
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9fae34b418d8854a4b364f1044c114896456110
| 1,050
|
py
|
Python
|
scripts/check_categories.py
|
oberron/entolusis
|
209e1e245d8e501e5e6ea2f52dd5b0da7d886f5c
|
[
"MIT"
] | null | null | null |
scripts/check_categories.py
|
oberron/entolusis
|
209e1e245d8e501e5e6ea2f52dd5b0da7d886f5c
|
[
"MIT"
] | null | null | null |
scripts/check_categories.py
|
oberron/entolusis
|
209e1e245d8e501e5e6ea2f52dd5b0da7d886f5c
|
[
"MIT"
] | null | null | null |
# list categories in category folder
from os import walk
from os.path import abspath,join, pardir
categories_folder = abspath(join(__file__,pardir,pardir,"category"))
post_folder = abspath(join(__file__,pardir,pardir,"_posts"))
site_categories = []
for root,directories,files in walk(categories_folder):
for f in files:
site_categories.append(f.split(".md")[0])
site_categories = set(site_categories)
for root,directories,files in walk(post_folder):
for f in files:
with open(join(root,f),'r',encoding="utf-8") as fi:
lines = fi.readlines()
for l in lines:
if l.find("categories")==0:
categories = l.split(":")[1]
for c in [" ","[","]","\n"]:
categories = categories.replace(c,"")
categories=categories.split(",")
if len(set(categories)-site_categories)>0:
print(f,set(categories)-site_categories)
break
print("done")
| 36.206897
| 68
| 0.578095
| 124
| 1,050
| 4.741935
| 0.387097
| 0.142857
| 0.057823
| 0.071429
| 0.316327
| 0.258503
| 0.146259
| 0.146259
| 0
| 0
| 0
| 0.00672
| 0.291429
| 1,050
| 29
| 69
| 36.206897
| 0.783602
| 0.032381
| 0
| 0.086957
| 0
| 0
| 0.04335
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9fb43e9d0e20574f25b444b461b284752a17b4c
| 5,311
|
py
|
Python
|
docsrc/makedoc.py
|
syoyo/soloud
|
cce88a2408a4b1e88ccbc75de9897b39bc3e7dda
|
[
"Libpng",
"Zlib"
] | 1
|
2019-11-25T11:32:09.000Z
|
2019-11-25T11:32:09.000Z
|
docsrc/makedoc.py
|
syoyo/soloud
|
cce88a2408a4b1e88ccbc75de9897b39bc3e7dda
|
[
"Libpng",
"Zlib"
] | null | null | null |
docsrc/makedoc.py
|
syoyo/soloud
|
cce88a2408a4b1e88ccbc75de9897b39bc3e7dda
|
[
"Libpng",
"Zlib"
] | null | null | null |
#!/usr/bin/env python3
""" builds documentation files from multimarkdown (mmd) source
to various formats, including the web site and pdf.
"""
import subprocess
import glob
import os
import sys
import time
import shutil
src = [
"intro.mmd",
"downloads.mmd",
"quickstart.mmd",
"faq.mmd",
"dirstruct.mmd",
"premake.mmd",
"legal.mmd",
"concepts.mmd",
"concepts3d.mmd",
"voicemanagement.mmd",
"examples.mmd",
"foreign_interface.mmd",
"c_api.mmd",
"python_api.mmd",
"ruby_api.mmd",
"rpgmaker_api.mmd",
"bmx_api.mmd",
"gamemaker_api.mmd",
"cs_api.mmd",
"d_api.mmd",
"codegen.mmd",
"basics.mmd",
"attributes.mmd",
"faders.mmd",
"voicegroups.mmd",
"coremisc.mmd",
"core3d.mmd",
"audiosource.mmd",
"newsoundsources.mmd",
"wav.mmd",
"wavstream.mmd",
"speech.mmd",
"sfxr.mmd",
"modplug.mmd",
"monotone.mmd",
"tedsid.mmd",
"vizsn.mmd",
"vic.mmd",
"filters.mmd",
"biquadfilter.mmd",
"echofilter.mmd",
"lofifilter.mmd",
"flangerfilter.mmd",
"dcremovalfilter.mmd",
"fftfilter.mmd",
"bassboostfilter.mmd",
"waveshaperfilter.mmd",
"mixbus.mmd",
"queue.mmd",
"collider.mmd",
"attenuator.mmd",
"file.mmd",
"backends.mmd"
]
website_only = [
"downloads.mmd"
]
unknown = 0
for file in glob.glob("*.mmd"):
if file not in src:
unknown = 1
print(file + " not included in docs!")
if unknown:
print("Add the new files to makedoc.py, soloud.tex and htmlpre.txt.")
sys.exit()
datestring = time.strftime("%Y%m%d")
if not os.path.exists(datestring + "/web"):
os.makedirs(datestring + "/web")
if not os.path.exists("temp/"):
os.makedirs("temp/")
print("- -- --- -- - Generating single-file HTML docs")
callp = ["pandoc", "-s", "-t", "html5", "-f", "markdown-smart", "--metadata", 'title="SoLoud ' + datestring + '"', "-H", "singlehtml_head.txt", "-B", "singlehtml_body.txt", "--toc", "--self-contained", "--default-image-extension=png", "-o", datestring + "/soloud_" + datestring + ".html"]
for x in src:
if x not in website_only:
callp.append(x)
subprocess.call(callp)
print("- -- --- -- - Generating web site")
for x in src:
subprocess.call(["pandoc", "--template=html.pandoc", "-f", "markdown-smart", "--metadata", 'title="SoLoud ' + datestring + ' ' + x[:len(x)-4] + '"', "-B", "htmlpre.txt", "-A", "htmlpost.txt", "--default-image-extension=png", x, "-o", datestring + "/web/" + x[:len(x)-3]+"html.bak"])
with open(datestring + "/web/" + x[:len(x)-3]+"html", "w") as file_out:
with open(datestring + "/web/" + x[:len(x)-3]+"html.bak", "r") as file_in:
for line in file_in:
file_out.write(line.replace('code>', 'code>\n').replace('::','::<wbr>').replace('\xc2','').replace('\xa0',''))
if x == "intro.mmd":
if os.path.isfile(datestring + "/web/index.html"):
os.remove(datestring + "/web/index.html")
os.rename(datestring + "/web/intro.html", datestring + "/web/index.html")
print("- -- --- -- - Generating epub")
callp = ["pandoc", "-N", "--toc", "--epub-cover-image=images/cover.png", "-t", "epub3", "--default-image-extension=png", "-f", "markdown-smart", "--css=epub.css", "--epub-metadata=metadata.xml", "-o", datestring + "/soloud_" + datestring + ".epub", "title.txt"]
for x in src:
if x not in website_only:
callp.append(x)
subprocess.call(callp)
print("- -- --- -- - Converting epub -> mobi (kindlegen_output.txt)")
with open('kindlegen_output.txt', 'w') as outfile:
subprocess.call(["kindlegen", datestring + "/soloud_" + datestring + ".epub", "-c2"], stdout=outfile)
print("- -- --- -- - Generating LaTex")
for x in src:
if x not in website_only:
subprocess.call(["pandoc", "-t", "latex", "--listings", "--default-image-extension=pdf", "--top-level-division=chapter", x, "-o", "temp/" + x[:len(x)-3]+"tex.orig"])
with open("temp/" + x[:len(x)-3]+"tex", "w") as file_out:
with open("temp/" + x[:len(x)-3]+"tex.orig", "r") as file_in:
for line in file_in:
file_out.write(line.replace('\\begin{longtable}[]{@{}ll@{}}', '\\begin{tabulary}{\\textwidth}{lJ}').replace('\\begin{longtable}[]{@{}lll@{}}', '\\begin{tabulary}{\\textwidth}{lJJ}').replace('\\begin{longtable}[]{@{}llll@{}}', '\\begin{tabulary}{\\textwidth}{lJJJ}').replace('\\endhead','').replace('\\end{longtable}','\\end{tabulary}'))
print("- -- --- -- - Generating pdf (xelatex_output.txt)")
with open('xelatex_output.txt', 'w') as outfile:
subprocess.call(["xelatex", "SoLoud.tex"], stdout=outfile)
print("- -- --- -- - Generating pdf pass 2..")
subprocess.call(["xelatex", "SoLoud.tex"], stdout=outfile)
shutil.move("SoLoud.pdf", datestring + "/soloud_" + datestring + ".pdf")
print("- -- --- -- - Cleanup..")
tempsuffix = ["aux", "toc", "out", "log", "lg", "4ct", "4tc", "idv", "tmp", "xdv", "xref", "bak"]
for suffix in tempsuffix:
for file in glob.glob("*."+suffix):
os.remove(file)
for file in glob.glob(datestring + "/web/*."+suffix):
os.remove(file)
for file in glob.glob("temp/*"):
os.remove(file)
os.rmdir("temp")
print("- -- --- -- - Done - " + datestring)
| 34.940789
| 356
| 0.583129
| 662
| 5,311
| 4.63142
| 0.326284
| 0.042401
| 0.011416
| 0.011742
| 0.272016
| 0.234181
| 0.227658
| 0.14775
| 0.124592
| 0.081539
| 0
| 0.004619
| 0.184711
| 5,311
| 151
| 357
| 35.172185
| 0.703464
| 0.024854
| 0
| 0.140625
| 0
| 0
| 0.433327
| 0.090962
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.007813
| 0.046875
| 0
| 0.046875
| 0.085938
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9ff46cab163507c14f9b26bf086ce4979f54a2c
| 4,972
|
py
|
Python
|
tools/unidatadownload.py
|
henryiii/backrefs
|
ec82844098bc3bdc7bcaa61b32f80271e6a73da6
|
[
"MIT"
] | null | null | null |
tools/unidatadownload.py
|
henryiii/backrefs
|
ec82844098bc3bdc7bcaa61b32f80271e6a73da6
|
[
"MIT"
] | null | null | null |
tools/unidatadownload.py
|
henryiii/backrefs
|
ec82844098bc3bdc7bcaa61b32f80271e6a73da6
|
[
"MIT"
] | null | null | null |
"""Download `Unicodedata` files."""
from __future__ import unicode_literals
import os
import zipfile
import codecs
from urllib.request import urlopen
__version__ = '2.2.0'
HOME = os.path.dirname(os.path.abspath(__file__))
def zip_unicode(output, version):
"""Zip the Unicode files."""
zipper = zipfile.ZipFile(os.path.join(output, 'unicodedata', '%s.zip' % version), 'w', zipfile.ZIP_DEFLATED)
target = os.path.join(output, 'unicodedata', version)
print('Zipping %s.zip...' % version)
for root, dirs, files in os.walk(target):
for file in files:
if file.endswith('.txt'):
zipper.write(os.path.join(root, file), arcname=file)
def unzip_unicode(output, version):
"""Unzip the Unicode files."""
unzipper = zipfile.ZipFile(os.path.join(output, 'unicodedata', '%s.zip' % version))
target = os.path.join(output, 'unicodedata', version)
print('Unzipping %s.zip...' % version)
os.makedirs(target)
for f in unzipper.namelist():
# Do I need backslash on windows? Or is it forward as well?
unzipper.extract(f, target)
def download_unicodedata(version, output=HOME, no_zip=False):
"""Download Unicode data scripts and blocks."""
ver = tuple([int(x) for x in version.split('.')])
files = [
'UnicodeData.txt',
'Scripts.txt',
'Blocks.txt',
'PropList.txt',
'DerivedCoreProperties.txt',
'DerivedNormalizationProps.txt',
'CompositionExclusions.txt',
'PropertyValueAliases.txt',
'PropertyAliases.txt',
'EastAsianWidth.txt',
'LineBreak.txt',
'HangulSyllableType.txt',
'DerivedAge.txt',
'auxiliary/WordBreakProperty.txt',
'auxiliary/SentenceBreakProperty.txt',
'auxiliary/GraphemeBreakProperty.txt',
'extracted/DerivedDecompositionType.txt',
'extracted/DerivedNumericType.txt',
'extracted/DerivedNumericValues.txt',
'extracted/DerivedJoiningType.txt',
'extracted/DerivedJoiningGroup.txt',
'extracted/DerivedCombiningClass.txt',
'emoji/emoji-data.txt'
]
files.append('ScriptExtensions.txt')
files.append('IndicPositionalCategory.txt')
files.append('IndicSyllabicCategory.txt')
files.append('BidiBrackets.txt')
if ver >= (11, 0, 0):
files.append('VerticalOrientation.txt')
http_url = 'http://www.unicode.org/Public/%s/ucd/' % version
ftp_url = 'ftp://ftp.unicode.org/Public/%s/ucd/' % version
destination = os.path.join(output, 'unicodedata', version)
if not os.path.exists(destination):
os.makedirs(destination)
zip_data = not no_zip
for f in files:
file_location = os.path.join(destination, os.path.basename(f))
retrieved = False
if not os.path.exists(file_location):
for url in (ftp_url, http_url):
furl = url + f
try:
print('Downloading: %s --> %s' % (furl, file_location))
response = urlopen(furl, timeout=30)
data = response.read()
except Exception:
print('Failed: %s' % url)
continue
with codecs.open(file_location, 'w', encoding='utf-8') as uf:
uf.write(data.decode('utf-8'))
retrieved = True
break
if not retrieved:
print('Failed to acquire all needed Unicode files!')
break
else:
retrieved = True
print('Skipping: found %s' % file_location)
if not retrieved:
zip_data = False
break
if zip_data and not os.path.exists(os.path.join(output, 'unicodedata', '%s.zip' % version)):
zip_unicode(output, version)
def get_unicodedata(version, output=HOME, no_zip=False):
"""Ensure we have Unicode data to generate Unicode tables."""
target = os.path.join(output, 'unicodedata', version)
zip_target = os.path.join(output, 'unicodedata', '%s.zip' % version)
if not os.path.exists(target) and os.path.exists(zip_target):
unzip_unicode(output, version)
# Download missing files if any. Zip if required.
download_unicodedata(version, output, no_zip)
if __name__ == '__main__':
import argparse
import unicodedata
parser = argparse.ArgumentParser(prog='unidatadownload', description='Generate a unicode property table.')
parser.add_argument('--version', action='version', version="%(prog)s " + __version__)
parser.add_argument('--output', default=HOME, help='Output file.')
parser.add_argument('--unicode-version', default=None, help='Force a specific Unicode version.')
args = parser.parse_args()
if args.unicode_version is None:
version = unicodedata.unidata_version
else:
version = args.unicode_version
get_unicodedata(version, output=args.output)
| 32.927152
| 112
| 0.627715
| 560
| 4,972
| 5.467857
| 0.310714
| 0.035271
| 0.032658
| 0.041803
| 0.173416
| 0.167864
| 0.126061
| 0.088178
| 0.033965
| 0.033965
| 0
| 0.002934
| 0.245977
| 4,972
| 150
| 113
| 33.146667
| 0.813817
| 0.056718
| 0
| 0.111111
| 0
| 0
| 0.248981
| 0.11602
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.064815
| 0
| 0.101852
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a00c6e63b457a75c0424a247757123821cb24fb
| 1,230
|
py
|
Python
|
aspx2url/aspx2url.py
|
marcocucinato/aspx2url
|
985a0e51865bb7be15618155ff9844730c2eaaf6
|
[
"MIT"
] | null | null | null |
aspx2url/aspx2url.py
|
marcocucinato/aspx2url
|
985a0e51865bb7be15618155ff9844730c2eaaf6
|
[
"MIT"
] | null | null | null |
aspx2url/aspx2url.py
|
marcocucinato/aspx2url
|
985a0e51865bb7be15618155ff9844730c2eaaf6
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import re, sys, glob, getopt, os
def usage():
print('aspx2url v1.0')
print('Usage:')
print(sys.argv[0]+' -d -h filename(s)')
print('-d : Delete original file')
print('-h : This help')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
deleteOriginal = False
for option,value in opts:
if option == '-h':
usage()
sys.exit()
elif option == '-d':
deleteOriginal = True
for origFilename in args:
with open(origFilename, "r") as f:
html_doc = f.read()
prog = re.compile('\<mso\:URL.*?\>(.*?),.*?\<\/mso\:URL\>', re.M)
result = prog.search(html_doc)
url = result.group(1);
filename = re.search('(.*?)\.aspx',origFilename).group(1)
fullFilename = filename+'.url'
with open(fullFilename, 'w') as out:
out.write('[InternetShortcut]\n')
out.write('URL='+url)
out.write('\n')
if deleteOriginal:
os.remove(origFilename)
if __name__ == '__main__':
main()
| 29.285714
| 77
| 0.530081
| 145
| 1,230
| 4.393103
| 0.496552
| 0.037677
| 0.037677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009357
| 0.304878
| 1,230
| 41
| 78
| 30
| 0.735673
| 0
| 0
| 0.052632
| 0
| 0
| 0.139024
| 0.030894
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.105263
| 0.184211
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a00f65f8d9c6385beccc2cbd3c37ef660b0dc52
| 6,343
|
py
|
Python
|
tarentsocialwall/MongoDBClient.py
|
tarent/socialwall-backend
|
2f09b8ccdd62a15daaa281d6ff568cb6ef749ab6
|
[
"MIT"
] | null | null | null |
tarentsocialwall/MongoDBClient.py
|
tarent/socialwall-backend
|
2f09b8ccdd62a15daaa281d6ff568cb6ef749ab6
|
[
"MIT"
] | null | null | null |
tarentsocialwall/MongoDBClient.py
|
tarent/socialwall-backend
|
2f09b8ccdd62a15daaa281d6ff568cb6ef749ab6
|
[
"MIT"
] | 2
|
2019-08-06T14:14:44.000Z
|
2019-08-06T14:21:19.000Z
|
import random
from datetime import datetime
from passlib.handlers.sha2_crypt import sha256_crypt
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from tarentsocialwall.SocialPost import SocialPost
from tarentsocialwall.User import User
from tarentsocialwall.Util import Util
class MongoDBClient:
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if MongoDBClient.__instance == None:
MongoDBClient()
client = None
db = None
random_social_post_list = None
reset_counter = None
def __init__(self, uri):
# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string
self.client = MongoClient(uri)
self.db = self.client.socialPosts
try:
# The ismaster command is cheap and does not require auth.
self.client.admin.command('ismaster')
except ConnectionFailure:
print("Server not available")
if MongoDBClient.__instance != None:
raise Exception("This class is a singleton!")
else:
MongoDBClient.__instance = self
self.update_all_socialposts()
# write social_post into mongo
def write_social_post(self, social_post: SocialPost):
existing_dict = None
try:
existing_dict = self.db.socialPosts.find_one({'externalId': social_post.externalId})
except Exception as ex:
print(ex)
existing_dict = None
if existing_dict is None:
self.db.socialPosts.insert_one(social_post.__dict__)
else:
update_identifier = {'externalId': social_post.externalId, 'source': social_post.source}
self.db.socialPosts.replace_one(update_identifier, social_post.__dict__)
return 0
# read random social_post from list
def get_random_social_post(self) -> SocialPost:
if len(self.random_social_post_list) == 0:
return None
else:
if self.reset_counter >= len(self.random_social_post_list):
# when we went through all posts once we reset counter and shuffle list
# so we dont repeat the same circle of posts every time
self.reset_counter = 0
random.shuffle(self.random_social_post_list)
post = self.random_social_post_list[self.reset_counter]
self.reset_counter = self.reset_counter + 1
print(post)
if post is None:
return None
social_post = SocialPost()
social_post.set_dictionary(post)
return social_post
# read custom social_post from mongo
def get_custom_social_post(self):
doc = list(self.db.socialPosts.aggregate([{'$match': {'source': 'custom post'}}]))
print(list(doc))
if doc is None:
return None
social_post_list = []
for post in doc:
custom_post_item = SocialPost()
custom_post_item.set_dictionary(post)
social_post_list.append(custom_post_item)
return social_post_list
def delete_post(self, external_id):
removed = self.db.socialPosts.delete_one({'externalId': external_id})
print(removed)
def write_access_token(self, access_token, source):
existing_dict = self.db.storeAccessToken.find_one({'source': access_token})
if existing_dict is None:
identifier = {'access_token': access_token, 'source': source}
self.db.storeAccessToken.insert_one(identifier)
else:
update_identifier = {'access_token': access_token, 'source': source}
self.db.storeAccessToken.replace_one(update_identifier, access_token)
return 0
def read_access_token(self, source):
existing_dict = self.db.storeAccessToken.find_one({'source': source})
return existing_dict
def get_google_calendar_posts(self):
timestamp_var = datetime.utcnow().timestamp()
doc = list(self.db.socialPosts.aggregate([
{'$match': {'validFrom': {'$lte': timestamp_var},
'validTo': {'$gte': timestamp_var},
'source': 'Google calendar'}},
{'$sort': {'start': 1}}
]))
if doc is None:
return None
social_post_list = []
for post in doc:
custom_post_item = SocialPost()
custom_post_item.set_dictionary(post)
social_post_list.append(custom_post_item)
return social_post_list
def get_users(self):
users_db = list(self.db.socialwall_users.find())
if users_db is None:
return None
users = []
for item in users_db:
if item['username'] is not 'admin':
user = User()
user.set_dictionary(item)
users.append(user)
return users
def read_user(self, username):
return self.db.socialwall_users.find_one({'username': username})
def write_user(self, user: User):
username_dict = self.db.socialwall_users.find_one({'username': user.username})
if username_dict is None:
self.db.socialwall_users.insert_one(user.__dict__)
else:
update_identifier = {'username': user.username}
self.db.socialwall_users.replace_one(update_identifier, user.__dict__)
return 0
def delete_user(self, user: User):
self.db.socialwall_users.delete_one({'username': user['username']})
def init_admin(self):
random_string = Util.randomString()
user = User()
user.username = 'admin'
user.password = sha256_crypt.hash(random_string)
print("Admin password is '%s'" % random_string)
user.firstname = 'admin'
user.lastname = 'admin'
self.write_user(user)
#Get all valid social posts from db and shuffle them in random order
def update_all_socialposts(self):
timestamp = datetime.utcnow().timestamp()
self.random_social_post_list = list(self.db.socialPosts.aggregate(
[{'$match': {'validFrom': {'$lte': timestamp}, 'validTo': {'$gte': timestamp}}}]))
random.shuffle(self.random_social_post_list)
self.reset_counter = 0
| 33.036458
| 100
| 0.631405
| 731
| 6,343
| 5.235294
| 0.201094
| 0.073164
| 0.047557
| 0.036582
| 0.296316
| 0.267311
| 0.238045
| 0.195453
| 0.174549
| 0.117586
| 0
| 0.003274
| 0.277787
| 6,343
| 191
| 101
| 33.209424
| 0.832133
| 0.07189
| 0
| 0.282609
| 0
| 0
| 0.058233
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0.021739
| 0.057971
| 0.007246
| 0.311594
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a0102385be6299942545100e581de23300db9a4
| 76,697
|
py
|
Python
|
src/mount_efs/__init__.py
|
Sodki/efs-utils
|
493d9ea0dde93b560519b184219f6f71e32a8fcf
|
[
"MIT"
] | null | null | null |
src/mount_efs/__init__.py
|
Sodki/efs-utils
|
493d9ea0dde93b560519b184219f6f71e32a8fcf
|
[
"MIT"
] | null | null | null |
src/mount_efs/__init__.py
|
Sodki/efs-utils
|
493d9ea0dde93b560519b184219f6f71e32a8fcf
|
[
"MIT"
] | 12
|
2020-10-22T03:47:51.000Z
|
2022-03-19T18:09:59.000Z
|
#!/usr/bin/env python
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
#
# Copy this script to /sbin/mount.efs and make sure it is executable.
#
# You will be able to mount an EFS file system by its short name, by adding it
# to /etc/fstab. The syntax of an fstab entry is:
#
# [Device] [Mount Point] [File System Type] [Options] [Dump] [Pass]
#
# Add an entry like this:
#
# fs-deadbeef /mount_point efs _netdev 0 0
#
# Using the 'efs' type will cause '/sbin/mount.efs' to be called by 'mount -a'
# for this file system. The '_netdev' option tells the init system that the
# 'efs' type is a networked file system type. This has been tested with systemd
# (Amazon Linux 2, CentOS 7, RHEL 7, Debian 9, and Ubuntu 16.04), and upstart
# (Amazon Linux 2017.09).
#
# Once there is an entry in fstab, the file system can be mounted with:
#
# sudo mount /mount_point
#
# The script will add recommended mount options, if not provided in fstab.
import base64
import errno
import hashlib
import hmac
import json
import logging
import os
import pwd
import random
import re
import socket
import subprocess
import sys
import threading
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from logging.handlers import RotatingFileHandler
try:
import ConfigParser
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import ConfigParser, NoOptionError, NoSectionError
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
try:
from urllib2 import URLError, HTTPError, build_opener, urlopen, Request, HTTPHandler
from urllib import urlencode
except ImportError:
from urllib.request import urlopen, Request
from urllib.error import URLError, HTTPError
from urllib.parse import urlencode
try:
import botocore.session
from botocore.exceptions import ClientError, NoCredentialsError, EndpointConnectionError
BOTOCORE_PRESENT = True
except ImportError:
BOTOCORE_PRESENT = False
VERSION = '1.28.2'
SERVICE = 'elasticfilesystem'
CONFIG_FILE = '/etc/amazon/efs/efs-utils.conf'
CONFIG_SECTION = 'mount'
CLIENT_INFO_SECTION = 'client-info'
CLIENT_SOURCE_STR_LEN_LIMIT = 100
CLOUDWATCH_LOG_SECTION = 'cloudwatch-log'
DEFAULT_CLOUDWATCH_LOG_GROUP = '/aws/efs/utils'
DEFAULT_RETENTION_DAYS = 14
# Cloudwatchlog agent dict includes cloudwatchlog botocore client, cloudwatchlog group name, cloudwatchlog stream name
CLOUDWATCHLOG_AGENT = None
LOG_DIR = '/var/log/amazon/efs'
LOG_FILE = 'mount.log'
STATE_FILE_DIR = '/var/run/efs'
PRIVATE_KEY_FILE = '/etc/amazon/efs/privateKey.pem'
DATE_ONLY_FORMAT = '%Y%m%d'
SIGV4_DATETIME_FORMAT = '%Y%m%dT%H%M%SZ'
CERT_DATETIME_FORMAT = '%y%m%d%H%M%SZ'
AWS_CREDENTIALS_FILE = os.path.expanduser(os.path.join('~' + pwd.getpwuid(os.getuid()).pw_name, '.aws', 'credentials'))
AWS_CONFIG_FILE = os.path.expanduser(os.path.join('~' + pwd.getpwuid(os.getuid()).pw_name, '.aws', 'config'))
CA_CONFIG_BODY = """dir = %s
RANDFILE = $dir/database/.rand
[ ca ]
default_ca = local_ca
[ local_ca ]
database = $dir/database/index.txt
serial = $dir/database/serial
private_key = %s
cert = $dir/certificate.pem
new_certs_dir = $dir/certs
default_md = sha256
preserve = no
policy = efsPolicy
x509_extensions = v3_ca
[ efsPolicy ]
CN = supplied
[ req ]
prompt = no
distinguished_name = req_distinguished_name
[ req_distinguished_name ]
CN = %s
%s
%s
%s
"""
# SigV4 Auth
ALGORITHM = 'AWS4-HMAC-SHA256'
AWS4_REQUEST = 'aws4_request'
HTTP_REQUEST_METHOD = 'GET'
CANONICAL_URI = '/'
CANONICAL_HEADERS_DICT = {
'host': '%s'
}
CANONICAL_HEADERS = '\n'.join(['%s:%s' % (k, v) for k, v in sorted(CANONICAL_HEADERS_DICT.items())])
SIGNED_HEADERS = ';'.join(CANONICAL_HEADERS_DICT.keys())
REQUEST_PAYLOAD = ''
FS_ID_RE = re.compile('^(?P<fs_id>fs-[0-9a-f]+)$')
EFS_FQDN_RE = re.compile(r'^(?P<fs_id>fs-[0-9a-f]+)\.efs\.(?P<region>[a-z0-9-]+)\.(?P<dns_name_suffix>[a-z0-9.]+)$')
AP_ID_RE = re.compile('^fsap-[0-9a-f]{17}$')
CREDENTIALS_KEYS = ['AccessKeyId', 'SecretAccessKey', 'Token']
ECS_URI_ENV = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ECS_TASK_METADATA_API = 'http://169.254.170.2'
WEB_IDENTITY_ROLE_ARN_ENV = 'AWS_ROLE_ARN'
WEB_IDENTITY_TOKEN_FILE_ENV = 'AWS_WEB_IDENTITY_TOKEN_FILE'
STS_ENDPOINT_URL = 'https://sts.amazonaws.com/'
INSTANCE_METADATA_TOKEN_URL = 'http://169.254.169.254/latest/api/token'
INSTANCE_METADATA_SERVICE_URL = 'http://169.254.169.254/latest/dynamic/instance-identity/document/'
INSTANCE_IAM_URL = 'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
SECURITY_CREDS_ECS_URI_HELP_URL = 'https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html'
SECURITY_CREDS_WEBIDENTITY_HELP_URL = 'https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html'
SECURITY_CREDS_IAM_ROLE_HELP_URL = 'https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html'
DEFAULT_STUNNEL_VERIFY_LEVEL = 2
DEFAULT_STUNNEL_CAFILE = '/etc/amazon/efs/efs-utils.crt'
NOT_BEFORE_MINS = 15
NOT_AFTER_HOURS = 3
EFS_ONLY_OPTIONS = [
'accesspoint',
'awscredsuri',
'awsprofile',
'cafile',
'iam',
'netns',
'noocsp',
'ocsp',
'tls',
'tlsport',
'verify'
]
UNSUPPORTED_OPTIONS = [
'capath'
]
STUNNEL_GLOBAL_CONFIG = {
'fips': 'no',
'foreground': 'yes',
'socket': [
'l:SO_REUSEADDR=yes',
'a:SO_BINDTODEVICE=lo',
],
}
STUNNEL_EFS_CONFIG = {
'client': 'yes',
'accept': '127.0.0.1:%s',
'connect': '%s:2049',
'sslVersion': 'TLSv1.2',
'renegotiation': 'no',
'TIMEOUTbusy': '20',
'TIMEOUTclose': '0',
'TIMEOUTidle': '70',
'delay': 'yes',
}
WATCHDOG_SERVICE = 'amazon-efs-mount-watchdog'
SYSTEM_RELEASE_PATH = '/etc/system-release'
OS_RELEASE_PATH = '/etc/os-release'
RHEL8_RELEASE_NAME = 'Red Hat Enterprise Linux release 8'
CENTOS8_RELEASE_NAME = 'CentOS Linux release 8'
FEDORA_RELEASE_NAME = 'Fedora release'
SUSE_RELEASE_NAME = 'openSUSE Leap'
SKIP_NO_LIBWRAP_RELEASES = [RHEL8_RELEASE_NAME, CENTOS8_RELEASE_NAME, FEDORA_RELEASE_NAME, SUSE_RELEASE_NAME]
def fatal_error(user_message, log_message=None, exit_code=1):
if log_message is None:
log_message = user_message
sys.stderr.write('%s\n' % user_message)
logging.error(log_message)
publish_cloudwatch_log(CLOUDWATCHLOG_AGENT, 'Mount failed, %s' % log_message)
sys.exit(exit_code)
def get_target_region(config):
def _fatal_error(message):
fatal_error('Error retrieving region. Please set the "region" parameter in the efs-utils configuration file.', message)
metadata_exception = 'Unknown error'
try:
return config.get(CONFIG_SECTION, 'region')
except NoOptionError:
pass
try:
return get_region_from_instance_metadata()
except Exception as e:
metadata_exception = e
logging.warning('Region not found in config file and metadata service call failed, falling back '
'to legacy "dns_name_format" check')
try:
region = get_region_from_legacy_dns_format(config)
sys.stdout.write('Warning: region obtained from "dns_name_format" field. Please set the "region" '
'parameter in the efs-utils configuration file.')
return region
except Exception:
logging.warning('Legacy check for region in "dns_name_format" failed')
_fatal_error(metadata_exception)
def get_region_from_instance_metadata():
instance_identity = get_instance_identity_info_from_instance_metadata('region')
if not instance_identity:
raise Exception("Cannot retrieve region from instance_metadata")
return instance_identity
def get_instance_identity_info_from_instance_metadata(property):
ec2_metadata_unsuccessful_resp = 'Unsuccessful retrieval of EC2 metadata at %s.' % INSTANCE_METADATA_SERVICE_URL
ec2_metadata_url_error_msg = 'Unable to reach %s to retrieve EC2 instance metadata.' % INSTANCE_METADATA_SERVICE_URL
instance_identity = url_request_helper(INSTANCE_METADATA_SERVICE_URL, ec2_metadata_unsuccessful_resp,
ec2_metadata_url_error_msg, retry_with_new_header_token=True)
if instance_identity:
try:
return instance_identity[property]
except KeyError as e:
logging.warning('%s not present in %s: %s' % (property, instance_identity, e))
except TypeError as e:
logging.warning('response %s is not a json object: %s' % (instance_identity, e))
return None
def get_region_from_legacy_dns_format(config):
"""
For backwards compatibility check dns_name_format to obtain the target region. This functionality
should only be used if region is not present in the config file and metadata calls fail.
"""
dns_name_format = config.get(CONFIG_SECTION, 'dns_name_format')
if '{region}' not in dns_name_format:
split_dns_name_format = dns_name_format.split('.')
if '{dns_name_suffix}' in dns_name_format:
return split_dns_name_format[-2]
elif 'amazonaws.com' in dns_name_format:
return split_dns_name_format[-3]
raise Exception('Region not found in dns_name_format')
def get_aws_ec2_metadata_token():
try:
opener = build_opener(HTTPHandler)
request = Request(INSTANCE_METADATA_TOKEN_URL)
request.add_header('X-aws-ec2-metadata-token-ttl-seconds', 21600)
request.get_method = lambda: 'PUT'
res = opener.open(request)
return res.read()
except NameError:
headers = {'X-aws-ec2-metadata-token-ttl-seconds': 21600}
req = Request(INSTANCE_METADATA_TOKEN_URL, headers=headers, method='PUT')
res = urlopen(req)
return res.read()
def get_aws_security_credentials(use_iam, awsprofile=None, aws_creds_uri=None):
"""
Lookup AWS security credentials (access key ID and secret access key). Adapted credentials provider chain from:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html and
https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html
"""
if not use_iam:
return None, None
# attempt to lookup AWS security credentials through the credentials URI the ECS agent generated
if aws_creds_uri:
return get_aws_security_credentials_from_ecs(aws_creds_uri, True)
# attempt to lookup AWS security credentials in AWS credentials file (~/.aws/credentials)
# and configs file (~/.aws/config) with given awsprofile
if awsprofile:
return get_aws_security_credentials_from_awsprofile(awsprofile, True)
# attempt to lookup AWS security credentials through AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable
if ECS_URI_ENV in os.environ:
credentials, credentials_source = get_aws_security_credentials_from_ecs(os.environ[ECS_URI_ENV], False)
if credentials and credentials_source:
return credentials, credentials_source
# attempt to lookup AWS security credentials through AssumeRoleWithWebIdentity
# (e.g. for IAM Role for Service Accounts (IRSA) approach on EKS)
if WEB_IDENTITY_ROLE_ARN_ENV in os.environ and WEB_IDENTITY_TOKEN_FILE_ENV in os.environ:
credentials, credentials_source = get_aws_security_credentials_from_webidentity(
os.environ[WEB_IDENTITY_ROLE_ARN_ENV],
os.environ[WEB_IDENTITY_TOKEN_FILE_ENV],
False
)
if credentials and credentials_source:
return credentials, credentials_source
# attempt to lookup AWS security credentials with IAM role name attached to instance
# through IAM role name security credentials lookup uri
iam_role_name = get_iam_role_name()
if iam_role_name:
credentials, credentials_source = get_aws_security_credentials_from_instance_metadata(iam_role_name)
if credentials and credentials_source:
return credentials, credentials_source
error_msg = 'AWS Access Key ID and Secret Access Key are not found in AWS credentials file (%s), config file (%s), ' \
'from ECS credentials relative uri, or from the instance security credentials service' % \
(AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE)
fatal_error(error_msg, error_msg)
def get_aws_security_credentials_from_awsprofile(awsprofile, is_fatal=False):
for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]:
if os.path.exists(file_path):
credentials = credentials_file_helper(file_path, awsprofile)
if credentials['AccessKeyId']:
return credentials, os.path.basename(file_path) + ':' + awsprofile
# Fail if credentials cannot be fetched from the given awsprofile
if is_fatal:
log_message = 'AWS security credentials not found in %s or %s under named profile [%s]' % \
(AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE, awsprofile)
fatal_error(log_message)
else:
return None, None
def get_aws_security_credentials_from_ecs(aws_creds_uri, is_fatal=False):
ecs_uri = ECS_TASK_METADATA_API + aws_creds_uri
ecs_unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % ecs_uri
ecs_url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' \
% (ecs_uri, SECURITY_CREDS_ECS_URI_HELP_URL)
ecs_security_dict = url_request_helper(ecs_uri, ecs_unsuccessful_resp, ecs_url_error_msg)
if ecs_security_dict and all(k in ecs_security_dict for k in CREDENTIALS_KEYS):
return ecs_security_dict, 'ecs:' + aws_creds_uri
# Fail if credentials cannot be fetched from the given aws_creds_uri
if is_fatal:
fatal_error(ecs_unsuccessful_resp, ecs_unsuccessful_resp)
else:
return None, None
def get_aws_security_credentials_from_webidentity(role_arn, token_file, is_fatal=False):
try:
with open(token_file, 'r') as f:
token = f.read()
except Exception as e:
if is_fatal:
unsuccessful_resp = 'Error reading token file %s: %s' % (token_file, e)
fatal_error(unsuccessful_resp, unsuccessful_resp)
else:
return None, None
webidentity_url = STS_ENDPOINT_URL + '?' + urlencode({
'Version': '2011-06-15',
'Action': 'AssumeRoleWithWebIdentity',
'RoleArn': role_arn,
'RoleSessionName': 'efs-mount-helper',
'WebIdentityToken': token
})
unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % STS_ENDPOINT_URL
url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' % \
(STS_ENDPOINT_URL, SECURITY_CREDS_WEBIDENTITY_HELP_URL)
resp = url_request_helper(webidentity_url, unsuccessful_resp, url_error_msg, headers={'Accept': 'application/json'})
if resp:
creds = resp \
.get('AssumeRoleWithWebIdentityResponse', {}) \
.get('AssumeRoleWithWebIdentityResult', {}) \
.get('Credentials', {})
if all(k in creds for k in ['AccessKeyId', 'SecretAccessKey', 'SessionToken']):
return {
'AccessKeyId': creds['AccessKeyId'],
'SecretAccessKey': creds['SecretAccessKey'],
'Token': creds['SessionToken']
}, 'webidentity:' + ','.join([role_arn, token_file])
# Fail if credentials cannot be fetched from the given aws_creds_uri
if is_fatal:
fatal_error(unsuccessful_resp, unsuccessful_resp)
else:
return None, None
def get_aws_security_credentials_from_instance_metadata(iam_role_name):
security_creds_lookup_url = INSTANCE_IAM_URL + iam_role_name
unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % security_creds_lookup_url
url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' % \
(security_creds_lookup_url, SECURITY_CREDS_IAM_ROLE_HELP_URL)
iam_security_dict = url_request_helper(security_creds_lookup_url, unsuccessful_resp,
url_error_msg, retry_with_new_header_token=True)
if iam_security_dict and all(k in iam_security_dict for k in CREDENTIALS_KEYS):
return iam_security_dict, 'metadata:'
else:
return None, None
def get_iam_role_name():
iam_role_unsuccessful_resp = 'Unsuccessful retrieval of IAM role name at %s.' % INSTANCE_IAM_URL
iam_role_url_error_msg = 'Unable to reach %s to retrieve IAM role name. See %s for more info.' % \
(INSTANCE_IAM_URL, SECURITY_CREDS_IAM_ROLE_HELP_URL)
iam_role_name = url_request_helper(INSTANCE_IAM_URL, iam_role_unsuccessful_resp,
iam_role_url_error_msg, retry_with_new_header_token=True)
return iam_role_name
def credentials_file_helper(file_path, awsprofile):
aws_credentials_configs = read_config(file_path)
credentials = {'AccessKeyId': None, 'SecretAccessKey': None, 'Token': None}
try:
access_key = aws_credentials_configs.get(awsprofile, 'aws_access_key_id')
secret_key = aws_credentials_configs.get(awsprofile, 'aws_secret_access_key')
session_token = aws_credentials_configs.get(awsprofile, 'aws_session_token')
credentials['AccessKeyId'] = access_key
credentials['SecretAccessKey'] = secret_key
credentials['Token'] = session_token
except NoOptionError as e:
if 'aws_access_key_id' in str(e) or 'aws_secret_access_key' in str(e):
logging.debug('aws_access_key_id or aws_secret_access_key not found in %s under named profile [%s]', file_path,
awsprofile)
if 'aws_session_token' in str(e):
logging.debug('aws_session_token not found in %s', file_path)
credentials['AccessKeyId'] = aws_credentials_configs.get(awsprofile, 'aws_access_key_id')
credentials['SecretAccessKey'] = aws_credentials_configs.get(awsprofile, 'aws_secret_access_key')
except NoSectionError:
logging.debug('No [%s] section found in config file %s', awsprofile, file_path)
return credentials
def get_aws_profile(options, use_iam):
awsprofile = options.get('awsprofile')
if not awsprofile and use_iam:
for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]:
aws_credentials_configs = read_config(file_path)
# check if aws access key id is found under [default] section in current file and return 'default' if so
try:
access_key = aws_credentials_configs.get('default', 'aws_access_key_id')
if access_key is not None:
return 'default'
except (NoSectionError, NoOptionError):
continue
return awsprofile
def url_request_helper(url, unsuccessful_resp, url_error_msg, headers={}, retry_with_new_header_token=False):
try:
req = Request(url)
for k, v in headers.items():
req.add_header(k, v)
request_resp = urlopen(req, timeout=1)
return get_resp_obj(request_resp, url, unsuccessful_resp)
except HTTPError as e:
# For instance enable with IMDSv2, Unauthorized 401 error will be thrown,
# to retrieve metadata, the header should embeded with metadata token
if e.code == 401 and retry_with_new_header_token:
token = get_aws_ec2_metadata_token()
req.add_header('X-aws-ec2-metadata-token', token)
request_resp = urlopen(req, timeout=1)
return get_resp_obj(request_resp, url, unsuccessful_resp)
err_msg = 'Unable to reach the url at %s: status=%d, reason is %s' % (url, e.code, e.reason)
except URLError as e:
err_msg = 'Unable to reach the url at %s, reason is %s' % (url, e.reason)
if err_msg:
logging.debug('%s %s', url_error_msg, err_msg)
return None
def get_resp_obj(request_resp, url, unsuccessful_resp):
if request_resp.getcode() != 200:
logging.debug(unsuccessful_resp + ' %s: ResponseCode=%d', url, request_resp.getcode())
return None
resp_body = request_resp.read()
resp_body_type = type(resp_body)
try:
if resp_body_type is str:
resp_dict = json.loads(resp_body)
else:
resp_dict = json.loads(resp_body.decode(request_resp.headers.get_content_charset() or 'us-ascii'))
return resp_dict
except ValueError as e:
logging.info('ValueError parsing "%s" into json: %s. Returning response body.' % (str(resp_body), e))
return resp_body if resp_body_type is str else resp_body.decode('utf-8')
def parse_options(options):
opts = {}
for o in options.split(','):
if '=' in o:
k, v = o.split('=')
opts[k] = v
else:
opts[o] = None
return opts
def get_tls_port_range(config):
lower_bound = config.getint(CONFIG_SECTION, 'port_range_lower_bound')
upper_bound = config.getint(CONFIG_SECTION, 'port_range_upper_bound')
if lower_bound >= upper_bound:
fatal_error('Configuration option "port_range_upper_bound" defined as %d '
'must be strictly greater than "port_range_lower_bound" defined as %d.'
% (upper_bound, lower_bound))
return lower_bound, upper_bound
def choose_tls_port(config, options):
if 'tlsport' in options:
ports_to_try = [int(options['tlsport'])]
else:
lower_bound, upper_bound = get_tls_port_range(config)
tls_ports = list(range(lower_bound, upper_bound))
# Choose a random midpoint, and then try ports in-order from there
mid = random.randrange(len(tls_ports))
ports_to_try = tls_ports[mid:] + tls_ports[:mid]
assert len(tls_ports) == len(ports_to_try)
sock = socket.socket()
for tls_port in ports_to_try:
try:
sock.bind(('localhost', tls_port))
sock.close()
return tls_port
except socket.error:
continue
sock.close()
if 'tlsport' in options:
fatal_error('Specified port [%s] is unavailable. Try selecting a different port.' % options['tlsport'])
else:
fatal_error('Failed to locate an available port in the range [%d, %d], try specifying a different port range in %s'
% (lower_bound, upper_bound, CONFIG_FILE))
def is_ocsp_enabled(config, options):
if 'ocsp' in options:
return True
elif 'noocsp' in options:
return False
else:
return config.getboolean(CONFIG_SECTION, 'stunnel_check_cert_validity')
def get_mount_specific_filename(fs_id, mountpoint, tls_port):
return '%s.%s.%d' % (fs_id, os.path.abspath(mountpoint).replace(os.sep, '.').lstrip('.'), tls_port)
def serialize_stunnel_config(config, header=None):
lines = []
if header:
lines.append('[%s]' % header)
for k, v in config.items():
if type(v) is list:
for item in v:
lines.append('%s = %s' % (k, item))
else:
lines.append('%s = %s' % (k, v))
return lines
def add_stunnel_ca_options(efs_config, config, options):
if 'cafile' in options:
stunnel_cafile = options['cafile']
else:
try:
stunnel_cafile = config.get(CONFIG_SECTION, 'stunnel_cafile')
except NoOptionError:
logging.debug('No CA file configured, using default CA file %s', DEFAULT_STUNNEL_CAFILE)
stunnel_cafile = DEFAULT_STUNNEL_CAFILE
if not os.path.exists(stunnel_cafile):
fatal_error('Failed to find certificate authority file for verification',
'Failed to find CAfile "%s"' % stunnel_cafile)
efs_config['CAfile'] = stunnel_cafile
def is_stunnel_option_supported(stunnel_output, stunnel_option_name):
supported = False
for line in stunnel_output:
if line.startswith(stunnel_option_name):
supported = True
break
if not supported:
logging.warning('stunnel does not support "%s"', stunnel_option_name)
return supported
def get_version_specific_stunnel_options():
stunnel_command = [_stunnel_bin(), '-help']
proc = subprocess.Popen(stunnel_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc.wait()
_, err = proc.communicate()
stunnel_output = err.splitlines()
check_host_supported = is_stunnel_option_supported(stunnel_output, b'checkHost')
ocsp_aia_supported = is_stunnel_option_supported(stunnel_output, b'OCSPaia')
return check_host_supported, ocsp_aia_supported
def _stunnel_bin():
return find_command_path('stunnel',
'Please install it following the instructions at '
'https://docs.aws.amazon.com/efs/latest/ug/using-amazon-efs-utils.html#upgrading-stunnel')
def find_command_path(command, install_method):
try:
env_path = '/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin'
os.putenv('PATH', env_path)
path = subprocess.check_output(['which', command])
except subprocess.CalledProcessError as e:
fatal_error('Failed to locate %s in %s - %s' % (command, env_path, install_method), e)
return path.strip().decode()
def get_system_release_version():
try:
with open(SYSTEM_RELEASE_PATH) as f:
return f.read().strip()
except IOError:
logging.debug('Unable to read %s', SYSTEM_RELEASE_PATH)
try:
with open(OS_RELEASE_PATH) as f:
for line in f:
if 'PRETTY_NAME' in line:
return line.split('=')[1].strip()
except IOError:
logging.debug('Unable to read %s', OS_RELEASE_PATH)
return 'unknown'
def write_stunnel_config_file(config, state_file_dir, fs_id, mountpoint, tls_port, dns_name, verify_level, ocsp_enabled,
options, log_dir=LOG_DIR, cert_details=None):
"""
Serializes stunnel configuration to a file. Unfortunately this does not conform to Python's config file format, so we have to
hand-serialize it.
"""
mount_filename = get_mount_specific_filename(fs_id, mountpoint, tls_port)
global_config = dict(STUNNEL_GLOBAL_CONFIG)
if config.getboolean(CONFIG_SECTION, 'stunnel_debug_enabled'):
global_config['debug'] = 'debug'
if config.has_option(CONFIG_SECTION, 'stunnel_logs_file'):
global_config['output'] = config.get(CONFIG_SECTION, 'stunnel_logs_file').replace('{fs_id}', fs_id)
else:
global_config['output'] = os.path.join(log_dir, '%s.stunnel.log' % mount_filename)
efs_config = dict(STUNNEL_EFS_CONFIG)
efs_config['accept'] = efs_config['accept'] % tls_port
efs_config['connect'] = efs_config['connect'] % dns_name
efs_config['verify'] = verify_level
if verify_level > 0:
add_stunnel_ca_options(efs_config, config, options)
if cert_details:
efs_config['cert'] = cert_details['certificate']
efs_config['key'] = cert_details['privateKey']
check_host_supported, ocsp_aia_supported = get_version_specific_stunnel_options()
tls_controls_message = 'WARNING: Your client lacks sufficient controls to properly enforce TLS. Please upgrade stunnel, ' \
'or disable "%%s" in %s.\nSee %s for more detail.' % (CONFIG_FILE,
'https://docs.aws.amazon.com/console/efs/troubleshooting-tls')
if config.getboolean(CONFIG_SECTION, 'stunnel_check_cert_hostname'):
if check_host_supported:
efs_config['checkHost'] = dns_name
else:
fatal_error(tls_controls_message % 'stunnel_check_cert_hostname')
# Only use the config setting if the override is not set
if ocsp_enabled:
if ocsp_aia_supported:
efs_config['OCSPaia'] = 'yes'
else:
fatal_error(tls_controls_message % 'stunnel_check_cert_validity')
system_release_version = get_system_release_version()
if not any(release in system_release_version for release in SKIP_NO_LIBWRAP_RELEASES):
efs_config['libwrap'] = 'no'
stunnel_config = '\n'.join(serialize_stunnel_config(global_config) + serialize_stunnel_config(efs_config, 'efs'))
logging.debug('Writing stunnel configuration:\n%s', stunnel_config)
stunnel_config_file = os.path.join(state_file_dir, 'stunnel-config.%s' % mount_filename)
with open(stunnel_config_file, 'w') as f:
f.write(stunnel_config)
return stunnel_config_file
def write_tls_tunnel_state_file(fs_id, mountpoint, tls_port, tunnel_pid, command, files, state_file_dir, cert_details=None):
"""
Return the name of the temporary file containing TLS tunnel state, prefixed with a '~'. This file needs to be renamed to a
non-temporary version following a successful mount.
"""
state_file = '~' + get_mount_specific_filename(fs_id, mountpoint, tls_port)
state = {
'pid': tunnel_pid,
'cmd': command,
'files': files,
}
if cert_details:
state.update(cert_details)
with open(os.path.join(state_file_dir, state_file), 'w') as f:
json.dump(state, f)
return state_file
def test_tunnel_process(tunnel_proc, fs_id):
tunnel_proc.poll()
if tunnel_proc.returncode is not None:
out, err = tunnel_proc.communicate()
fatal_error('Failed to initialize TLS tunnel for %s' % fs_id,
'Failed to start TLS tunnel (errno=%d). stdout="%s" stderr="%s"'
% (tunnel_proc.returncode, out.strip(), err.strip()))
def poll_tunnel_process(tunnel_proc, fs_id, mount_completed):
"""
poll the tunnel process health every .5s during the mount attempt to fail fast if the tunnel dies - since this is not called
from the main thread, if the tunnel fails, exit uncleanly with os._exit
"""
while not mount_completed.is_set():
try:
test_tunnel_process(tunnel_proc, fs_id)
except SystemExit as e:
os._exit(e.code)
mount_completed.wait(.5)
def get_init_system(comm_file='/proc/1/comm'):
init_system = 'unknown'
try:
with open(comm_file) as f:
init_system = f.read().strip()
except IOError:
logging.warning('Unable to read %s', comm_file)
logging.debug('Identified init system: %s', init_system)
return init_system
def check_network_target(fs_id):
with open(os.devnull, 'w') as devnull:
rc = subprocess.call(['systemctl', 'status', 'network.target'], stdout=devnull, stderr=devnull, close_fds=True)
if rc != 0:
fatal_error('Failed to mount %s because the network was not yet available, add "_netdev" to your mount options' % fs_id,
exit_code=0)
def check_network_status(fs_id, init_system):
if init_system != 'systemd':
logging.debug('Not testing network on non-systemd init systems')
return
check_network_target(fs_id)
def start_watchdog(init_system):
if init_system == 'init':
proc = subprocess.Popen(
['/sbin/status', WATCHDOG_SERVICE], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
status, _ = proc.communicate()
if 'stop' in status:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['/sbin/start', WATCHDOG_SERVICE], stdout=devnull, stderr=devnull, close_fds=True)
elif 'start' in status:
logging.debug('%s is already running', WATCHDOG_SERVICE)
elif init_system == 'systemd':
rc = subprocess.call(['systemctl', 'is-active', '--quiet', WATCHDOG_SERVICE], close_fds=True)
if rc != 0:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['systemctl', 'start', WATCHDOG_SERVICE], stdout=devnull, stderr=devnull, close_fds=True)
else:
logging.debug('%s is already running', WATCHDOG_SERVICE)
else:
error_message = 'Could not start %s, unrecognized init system "%s"' % (WATCHDOG_SERVICE, init_system)
sys.stderr.write('%s\n' % error_message)
logging.warning(error_message)
def create_required_directory(config, directory):
mode = 0o750
try:
mode_str = config.get(CONFIG_SECTION, 'state_file_dir_mode')
try:
mode = int(mode_str, 8)
except ValueError:
logging.warning('Bad state_file_dir_mode "%s" in config file "%s"', mode_str, CONFIG_FILE)
except NoOptionError:
pass
try:
os.makedirs(directory, mode)
except OSError as e:
if errno.EEXIST != e.errno or not os.path.isdir(directory):
raise
@contextmanager
def bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options, state_file_dir=STATE_FILE_DIR):
tls_port = choose_tls_port(config, options)
# override the tlsport option so that we can later override the port the NFS client uses to connect to stunnel.
# if the user has specified tlsport=X at the command line this will just re-set tlsport to X.
options['tlsport'] = tls_port
use_iam = 'iam' in options
ap_id = options.get('accesspoint')
cert_details = {}
security_credentials = None
client_info = get_client_info(config)
if use_iam:
aws_creds_uri = options.get('awscredsuri')
if aws_creds_uri:
kwargs = {'aws_creds_uri': aws_creds_uri}
else:
kwargs = {'awsprofile': get_aws_profile(options, use_iam)}
security_credentials, credentials_source = get_aws_security_credentials(use_iam, **kwargs)
if credentials_source:
cert_details['awsCredentialsMethod'] = credentials_source
if ap_id:
cert_details['accessPoint'] = ap_id
# additional symbol appended to avoid naming collisions
cert_details['mountStateDir'] = get_mount_specific_filename(fs_id, mountpoint, tls_port) + '+'
# common name for certificate signing request is max 64 characters
cert_details['commonName'] = socket.gethostname()[0:64]
cert_details['region'] = get_target_region(config)
cert_details['certificateCreationTime'] = create_certificate(config, cert_details['mountStateDir'],
cert_details['commonName'], cert_details['region'], fs_id,
security_credentials, ap_id, client_info,
base_path=state_file_dir)
cert_details['certificate'] = os.path.join(state_file_dir, cert_details['mountStateDir'], 'certificate.pem')
cert_details['privateKey'] = get_private_key_path()
cert_details['fsId'] = fs_id
start_watchdog(init_system)
if not os.path.exists(state_file_dir):
create_required_directory(config, state_file_dir)
verify_level = int(options.get('verify', DEFAULT_STUNNEL_VERIFY_LEVEL))
ocsp_enabled = is_ocsp_enabled(config, options)
stunnel_config_file = write_stunnel_config_file(config, state_file_dir, fs_id, mountpoint, tls_port, dns_name, verify_level,
ocsp_enabled, options, cert_details=cert_details)
tunnel_args = [_stunnel_bin(), stunnel_config_file]
if 'netns' in options:
tunnel_args = ['nsenter', '--net=' + options['netns']] + tunnel_args
# launch the tunnel in a process group so if it has any child processes, they can be killed easily by the mount watchdog
logging.info('Starting TLS tunnel: "%s"', ' '.join(tunnel_args))
tunnel_proc = subprocess.Popen(
tunnel_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid, close_fds=True)
logging.info('Started TLS tunnel, pid: %d', tunnel_proc.pid)
temp_tls_state_file = write_tls_tunnel_state_file(fs_id, mountpoint, tls_port, tunnel_proc.pid, tunnel_args,
[stunnel_config_file], state_file_dir, cert_details=cert_details)
try:
yield tunnel_proc
finally:
os.rename(os.path.join(state_file_dir, temp_tls_state_file), os.path.join(state_file_dir, temp_tls_state_file[1:]))
def get_nfs_mount_options(options):
# If you change these options, update the man page as well at man/mount.efs.8
if 'nfsvers' not in options and 'vers' not in options:
options['nfsvers'] = '4.1'
if 'rsize' not in options:
options['rsize'] = '1048576'
if 'wsize' not in options:
options['wsize'] = '1048576'
if 'soft' not in options and 'hard' not in options:
options['hard'] = None
if 'timeo' not in options:
options['timeo'] = '600'
if 'retrans' not in options:
options['retrans'] = '2'
if 'noresvport' not in options:
options['noresvport'] = None
if 'tls' in options:
options['port'] = options['tlsport']
def to_nfs_option(k, v):
if v is None:
return k
return '%s=%s' % (str(k), str(v))
nfs_options = [to_nfs_option(k, v) for k, v in options.items() if k not in EFS_ONLY_OPTIONS]
return ','.join(nfs_options)
def mount_nfs(dns_name, path, mountpoint, options):
if 'tls' in options:
mount_path = '127.0.0.1:%s' % path
else:
mount_path = '%s:%s' % (dns_name, path)
command = ['/sbin/mount.nfs4', mount_path, mountpoint, '-o', get_nfs_mount_options(options)]
if 'netns' in options:
command = ['nsenter', '--net=' + options['netns']] + command
logging.info('Executing: "%s"', ' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
out, err = proc.communicate()
if proc.returncode == 0:
message = 'Successfully mounted %s at %s' % (dns_name, mountpoint)
logging.info(message)
publish_cloudwatch_log(CLOUDWATCHLOG_AGENT, message)
else:
message = 'Failed to mount %s at %s: returncode=%d, stderr="%s"' % (dns_name, mountpoint, proc.returncode, err.strip())
fatal_error(err.strip(), message, proc.returncode)
def usage(out, exit_code=1):
out.write('Usage: mount.efs [--version] [-h|--help] <fsname> <mountpoint> [-o <options>]\n')
sys.exit(exit_code)
def parse_arguments_early_exit(args=None):
"""Parse arguments, checking for early exit conditions only"""
if args is None:
args = sys.argv
if '-h' in args[1:] or '--help' in args[1:]:
usage(out=sys.stdout, exit_code=0)
if '--version' in args[1:]:
sys.stdout.write('%s Version: %s\n' % (args[0], VERSION))
sys.exit(0)
def parse_arguments(config, args=None):
"""Parse arguments, return (fsid, path, mountpoint, options)"""
if args is None:
args = sys.argv
fsname = None
mountpoint = None
options = {}
if len(args) > 1:
fsname = args[1]
if len(args) > 2:
mountpoint = args[2]
if len(args) > 4 and '-o' in args[:-1]:
options_index = args.index('-o') + 1
options = parse_options(args[options_index])
if not fsname or not mountpoint:
usage(out=sys.stderr)
fs_id, path = match_device(config, fsname)
return fs_id, path, mountpoint, options
def get_client_info(config):
client_info = {}
# source key/value pair in config file
if config.has_option(CLIENT_INFO_SECTION, 'source'):
client_source = config.get(CLIENT_INFO_SECTION, 'source')
if 0 < len(client_source) <= CLIENT_SOURCE_STR_LEN_LIMIT:
client_info['source'] = client_source
return client_info
def create_certificate(config, mount_name, common_name, region, fs_id, security_credentials, ap_id, client_info,
base_path=STATE_FILE_DIR):
current_time = get_utc_now()
tls_paths = tls_paths_dictionary(mount_name, base_path)
certificate_config = os.path.join(tls_paths['mount_dir'], 'config.conf')
certificate_signing_request = os.path.join(tls_paths['mount_dir'], 'request.csr')
certificate = os.path.join(tls_paths['mount_dir'], 'certificate.pem')
ca_dirs_check(config, tls_paths['database_dir'], tls_paths['certs_dir'])
ca_supporting_files_check(tls_paths['index'], tls_paths['index_attr'], tls_paths['serial'], tls_paths['rand'])
private_key = check_and_create_private_key(base_path)
if security_credentials:
public_key = os.path.join(tls_paths['mount_dir'], 'publicKey.pem')
create_public_key(private_key, public_key)
create_ca_conf(certificate_config, common_name, tls_paths['mount_dir'], private_key, current_time, region, fs_id,
security_credentials, ap_id, client_info)
create_certificate_signing_request(certificate_config, private_key, certificate_signing_request)
not_before = get_certificate_timestamp(current_time, minutes=-NOT_BEFORE_MINS)
not_after = get_certificate_timestamp(current_time, hours=NOT_AFTER_HOURS)
cmd = 'openssl ca -startdate %s -enddate %s -selfsign -batch -notext -config %s -in %s -out %s' % \
(not_before, not_after, certificate_config, certificate_signing_request, certificate)
subprocess_call(cmd, 'Failed to create self-signed client-side certificate')
return current_time.strftime(CERT_DATETIME_FORMAT)
def get_private_key_path():
"""Wrapped for mocking purposes in unit tests"""
return PRIVATE_KEY_FILE
def check_and_create_private_key(base_path=STATE_FILE_DIR):
# Creating RSA private keys is slow, so we will create one private key and allow mounts to share it.
# This means, however, that we have to include a locking mechanism to ensure that the private key is
# atomically created, as mounts occurring in parallel may try to create the key simultaneously.
key = get_private_key_path()
@contextmanager
def open_lock_file():
lock_file = os.path.join(base_path, 'efs-utils-lock')
f = os.open(lock_file, os.O_CREAT | os.O_DSYNC | os.O_EXCL | os.O_RDWR)
try:
lock_file_contents = 'PID: %s' % os.getpid()
os.write(f, lock_file_contents.encode('utf-8'))
yield f
finally:
os.close(f)
os.remove(lock_file)
def do_with_lock(function):
while True:
try:
with open_lock_file():
return function()
except OSError as e:
if e.errno == errno.EEXIST:
logging.info('Failed to take out private key creation lock, sleeping 50 ms')
time.sleep(0.05)
else:
raise
def generate_key():
if os.path.isfile(key):
return
cmd = 'openssl genpkey -algorithm RSA -out %s -pkeyopt rsa_keygen_bits:3072' % key
subprocess_call(cmd, 'Failed to create private key')
read_only_mode = 0o400
os.chmod(key, read_only_mode)
do_with_lock(generate_key)
return key
def create_certificate_signing_request(config_path, private_key, csr_path):
cmd = 'openssl req -new -config %s -key %s -out %s' % (config_path, private_key, csr_path)
subprocess_call(cmd, 'Failed to create certificate signing request (csr)')
def create_ca_conf(config_path, common_name, directory, private_key, date,
region, fs_id, security_credentials, ap_id, client_info):
"""Populate ca/req configuration file with fresh configurations at every mount since SigV4 signature can change"""
public_key_path = os.path.join(directory, 'publicKey.pem')
ca_extension_body = ca_extension_builder(ap_id, security_credentials, fs_id, client_info)
efs_client_auth_body = efs_client_auth_builder(public_key_path, security_credentials['AccessKeyId'],
security_credentials['SecretAccessKey'], date, region, fs_id,
security_credentials['Token']) if security_credentials else ''
efs_client_info_body = efs_client_info_builder(client_info) if client_info else ''
full_config_body = CA_CONFIG_BODY % (directory, private_key, common_name, ca_extension_body,
efs_client_auth_body, efs_client_info_body)
with open(config_path, 'w') as f:
f.write(full_config_body)
return full_config_body
def ca_extension_builder(ap_id, security_credentials, fs_id, client_info):
ca_extension_str = '[ v3_ca ]\nsubjectKeyIdentifier = hash'
if ap_id:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.1 = ASN1:UTF8String:' + ap_id
if security_credentials:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.2 = ASN1:SEQUENCE:efs_client_auth'
ca_extension_str += '\n1.3.6.1.4.1.4843.7.3 = ASN1:UTF8String:' + fs_id
if client_info:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.4 = ASN1:SEQUENCE:efs_client_info'
return ca_extension_str
def efs_client_auth_builder(public_key_path, access_key_id, secret_access_key, date, region, fs_id, session_token=None):
public_key_hash = get_public_key_sha1(public_key_path)
canonical_request = create_canonical_request(public_key_hash, date, access_key_id, region, fs_id, session_token)
string_to_sign = create_string_to_sign(canonical_request, date, region)
signature = calculate_signature(string_to_sign, date, secret_access_key, region)
efs_client_auth_str = '[ efs_client_auth ]'
efs_client_auth_str += '\naccessKeyId = UTF8String:' + access_key_id
efs_client_auth_str += '\nsignature = OCTETSTRING:' + signature
efs_client_auth_str += '\nsigv4DateTime = UTCTIME:' + date.strftime(CERT_DATETIME_FORMAT)
if session_token:
efs_client_auth_str += '\nsessionToken = EXPLICIT:0,UTF8String:' + session_token
return efs_client_auth_str
def efs_client_info_builder(client_info):
efs_client_info_str = '[ efs_client_info ]'
for key, value in client_info.items():
efs_client_info_str += '\n%s = UTF8String:%s' % (key, value)
return efs_client_info_str
def create_public_key(private_key, public_key):
cmd = 'openssl rsa -in %s -outform PEM -pubout -out %s' % (private_key, public_key)
subprocess_call(cmd, 'Failed to create public key')
def subprocess_call(cmd, error_message):
"""Helper method to run shell openssl command and to handle response error messages"""
retry_times = 3
for retry in range(retry_times):
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(output, err) = process.communicate()
rc = process.poll()
if rc != 0:
logging.error('Command %s failed, rc=%s, stdout="%s", stderr="%s"' % (cmd, rc, output, err), exc_info=True)
try:
process.kill()
except OSError:
# Silently fail if the subprocess has exited already
pass
else:
return output, err
error_message = '%s, error is: %s' % (error_message, err)
fatal_error(error_message, error_message)
def ca_dirs_check(config, database_dir, certs_dir):
"""Check if mount's database and certs directories exist and if not, create directories (also create all intermediate
directories if they don't exist)."""
if not os.path.exists(database_dir):
create_required_directory(config, database_dir)
if not os.path.exists(certs_dir):
create_required_directory(config, certs_dir)
def ca_supporting_files_check(index_path, index_attr_path, serial_path, rand_path):
"""Recreate all supporting openssl ca and req files if they're not present in their respective directories"""
if not os.path.isfile(index_path):
open(index_path, 'w').close()
if not os.path.isfile(index_attr_path):
with open(index_attr_path, 'w+') as f:
f.write('unique_subject = no')
if not os.path.isfile(serial_path):
with open(serial_path, 'w+') as f:
f.write('00')
if not os.path.isfile(rand_path):
open(rand_path, 'w').close()
def get_certificate_timestamp(current_time, **kwargs):
updated_time = current_time + timedelta(**kwargs)
return updated_time.strftime(CERT_DATETIME_FORMAT)
def get_utc_now():
"""
Wrapped for patching purposes in unit tests
"""
return datetime.utcnow()
def assert_root():
if os.geteuid() != 0:
sys.stderr.write('only root can run mount.efs\n')
sys.exit(1)
def read_config(config_file=CONFIG_FILE):
try:
p = ConfigParser.SafeConfigParser()
except AttributeError:
p = ConfigParser()
p.read(config_file)
return p
def bootstrap_logging(config, log_dir=LOG_DIR):
raw_level = config.get(CONFIG_SECTION, 'logging_level')
levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
level = levels.get(raw_level.lower())
level_error = False
if not level:
# delay logging error about malformed log level until after logging is configured
level_error = True
level = logging.INFO
max_bytes = config.getint(CONFIG_SECTION, 'logging_max_bytes')
file_count = config.getint(CONFIG_SECTION, 'logging_file_count')
handler = RotatingFileHandler(os.path.join(log_dir, LOG_FILE), maxBytes=max_bytes, backupCount=file_count)
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(level)
logger.addHandler(handler)
if level_error:
logging.error('Malformed logging level "%s", setting logging level to %s', raw_level, level)
def get_dns_name(config, fs_id):
def _validate_replacement_field_count(format_str, expected_ct):
if format_str.count('{') != expected_ct or format_str.count('}') != expected_ct:
raise ValueError('DNS name format has an incorrect number of replacement fields')
dns_name_format = config.get(CONFIG_SECTION, 'dns_name_format')
if '{fs_id}' not in dns_name_format:
raise ValueError('DNS name format must include {fs_id}')
format_args = {'fs_id': fs_id}
expected_replacement_field_ct = 1
if '{region}' in dns_name_format:
expected_replacement_field_ct += 1
format_args['region'] = get_target_region(config)
if '{dns_name_suffix}' in dns_name_format:
expected_replacement_field_ct += 1
config_section = CONFIG_SECTION
region = format_args.get('region')
if region:
region_specific_config_section = '%s.%s' % (CONFIG_SECTION, region)
if config.has_section(region_specific_config_section):
config_section = region_specific_config_section
format_args['dns_name_suffix'] = config.get(config_section, 'dns_name_suffix')
logging.debug("Using dns_name_suffix %s in config section [%s]", format_args.get('dns_name_suffix'), config_section)
_validate_replacement_field_count(dns_name_format, expected_replacement_field_ct)
dns_name = dns_name_format.format(**format_args)
try:
socket.gethostbyname(dns_name)
except socket.gaierror:
fatal_error('Failed to resolve "%s" - check that your file system ID is correct.\nSee %s for more detail.'
% (dns_name, 'https://docs.aws.amazon.com/console/efs/mount-dns-name'),
'Failed to resolve "%s"' % dns_name)
return dns_name
def tls_paths_dictionary(mount_name, base_path=STATE_FILE_DIR):
tls_dict = {
'mount_dir': os.path.join(base_path, mount_name),
# every mount will have its own ca mode assets due to lack of multi-threading support in openssl
'database_dir': os.path.join(base_path, mount_name, 'database'),
'certs_dir': os.path.join(base_path, mount_name, 'certs'),
'index': os.path.join(base_path, mount_name, 'database/index.txt'),
'index_attr': os.path.join(base_path, mount_name, 'database/index.txt.attr'),
'serial': os.path.join(base_path, mount_name, 'database/serial'),
'rand': os.path.join(base_path, mount_name, 'database/.rand')
}
return tls_dict
def get_public_key_sha1(public_key):
# truncating public key to remove the header and footer '-----(BEGIN|END) PUBLIC KEY-----'
with open(public_key, 'r') as f:
lines = f.readlines()
lines = lines[1:-1]
key = ''.join(lines)
key = bytearray(base64.b64decode(key))
# Parse the public key to pull out the actual key material by looking for the key BIT STRING
# Example:
# 0:d=0 hl=4 l= 418 cons: SEQUENCE
# 4:d=1 hl=2 l= 13 cons: SEQUENCE
# 6:d=2 hl=2 l= 9 prim: OBJECT :rsaEncryption
# 17:d=2 hl=2 l= 0 prim: NULL
# 19:d=1 hl=4 l= 399 prim: BIT STRING
cmd = 'openssl asn1parse -inform PEM -in %s' % public_key
output, err = subprocess_call(cmd, 'Unable to ASN1 parse public key file, %s, correctly' % public_key)
key_line = ''
for line in output.splitlines():
if 'BIT STRING' in line.decode('utf-8'):
key_line = line.decode('utf-8')
if not key_line:
err_msg = 'Public key file, %s, is incorrectly formatted' % public_key
fatal_error(err_msg, err_msg)
key_line = key_line.replace(' ', '')
# DER encoding TLV (Tag, Length, Value)
# - the first octet (byte) is the tag (type)
# - the next octets are the length - "definite form"
# - the first octet always has the high order bit (8) set to 1
# - the remaining 127 bits are used to encode the number of octets that follow
# - the following octets encode, as big-endian, the length (which may be 0) as a number of octets
# - the remaining octets are the "value" aka content
#
# For a BIT STRING, the first octet of the value is used to signify the number of unused bits that exist in the last
# content byte. Note that this is explicitly excluded from the SubjectKeyIdentifier hash, per
# https://tools.ietf.org/html/rfc5280#section-4.2.1.2
#
# Example:
# 0382018f00...<subjectPublicKey>
# - 03 - BIT STRING tag
# - 82 - 2 length octets to follow (ignore high order bit)
# - 018f - length of 399
# - 00 - no unused bits in the last content byte
offset = int(key_line.split(':')[0])
key = key[offset:]
num_length_octets = key[1] & 0b01111111
# Exclude the tag (1), length (1 + num_length_octets), and number of unused bits (1)
offset = 1 + 1 + num_length_octets + 1
key = key[offset:]
sha1 = hashlib.sha1()
sha1.update(key)
return sha1.hexdigest()
def create_canonical_request(public_key_hash, date, access_key, region, fs_id, session_token=None):
"""
Create a Canonical Request - https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
"""
formatted_datetime = date.strftime(SIGV4_DATETIME_FORMAT)
credential = quote_plus(access_key + '/' + get_credential_scope(date, region))
request = HTTP_REQUEST_METHOD + '\n'
request += CANONICAL_URI + '\n'
request += create_canonical_query_string(public_key_hash, credential, formatted_datetime, session_token) + '\n'
request += CANONICAL_HEADERS % fs_id + '\n'
request += SIGNED_HEADERS + '\n'
sha256 = hashlib.sha256()
sha256.update(REQUEST_PAYLOAD.encode())
request += sha256.hexdigest()
return request
def create_canonical_query_string(public_key_hash, credential, formatted_datetime, session_token=None):
canonical_query_params = {
'Action': 'Connect',
# Public key hash is included in canonical request to tie the signature to a specific key pair to avoid replay attacks
'PublicKeyHash': quote_plus(public_key_hash),
'X-Amz-Algorithm': ALGORITHM,
'X-Amz-Credential': credential,
'X-Amz-Date': quote_plus(formatted_datetime),
'X-Amz-Expires': 86400,
'X-Amz-SignedHeaders': SIGNED_HEADERS,
}
if session_token:
canonical_query_params['X-Amz-Security-Token'] = quote_plus(session_token)
# Cannot use urllib.urlencode because it replaces the %s's
return '&'.join(['%s=%s' % (k, v) for k, v in sorted(canonical_query_params.items())])
def create_string_to_sign(canonical_request, date, region):
"""
Create a String to Sign - https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
"""
string_to_sign = ALGORITHM + '\n'
string_to_sign += date.strftime(SIGV4_DATETIME_FORMAT) + '\n'
string_to_sign += get_credential_scope(date, region) + '\n'
sha256 = hashlib.sha256()
sha256.update(canonical_request.encode())
string_to_sign += sha256.hexdigest()
return string_to_sign
def calculate_signature(string_to_sign, date, secret_access_key, region):
"""
Calculate the Signature - https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
"""
def _sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256)
key_date = _sign(('AWS4' + secret_access_key).encode('utf-8'), date.strftime(DATE_ONLY_FORMAT)).digest()
add_region = _sign(key_date, region).digest()
add_service = _sign(add_region, SERVICE).digest()
signing_key = _sign(add_service, 'aws4_request').digest()
return _sign(signing_key, string_to_sign).hexdigest()
def get_credential_scope(date, region):
return '/'.join([date.strftime(DATE_ONLY_FORMAT), region, SERVICE, AWS4_REQUEST])
def match_device(config, device):
"""Return the EFS id and the remote path to mount"""
try:
remote, path = device.split(':', 1)
except ValueError:
remote = device
path = '/'
if FS_ID_RE.match(remote):
return remote, path
try:
primary, secondaries, _ = socket.gethostbyname_ex(remote)
hostnames = list(filter(lambda e: e is not None, [primary] + secondaries))
except socket.gaierror:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error(
'Failed to resolve "%s" - check that the specified DNS name is a CNAME record resolving to a valid EFS DNS '
'name' % remote,
'Failed to resolve "%s"' % remote
)
if not hostnames:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error(
'The specified domain name "%s" did not resolve to an EFS mount target' % remote
)
for hostname in hostnames:
efs_fqdn_match = EFS_FQDN_RE.match(hostname)
if efs_fqdn_match:
fs_id = efs_fqdn_match.group('fs_id')
expected_dns_name = get_dns_name(config, fs_id)
# check that the DNS name of the mount target matches exactly the DNS name the CNAME resolves to
if hostname == expected_dns_name:
return fs_id, path
else:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error('The specified CNAME "%s" did not resolve to a valid DNS name for an EFS mount target. '
'Please refer to the EFS documentation for mounting with DNS names for examples: %s'
% (remote, 'https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html'))
def is_nfs_mount(mountpoint):
cmd = ['stat', '-f', '-L', '-c', '%T', mountpoint]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
output, _ = p.communicate()
return output and 'nfs' in str(output)
def mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options):
if os.path.ismount(mountpoint) and is_nfs_mount(mountpoint):
sys.stdout.write("%s is already mounted, please run 'mount' command to verify\n" % mountpoint)
logging.warning("%s is already mounted, mount aborted" % mountpoint)
return
with bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options) as tunnel_proc:
mount_completed = threading.Event()
t = threading.Thread(target=poll_tunnel_process, args=(tunnel_proc, fs_id, mount_completed))
t.daemon = True
t.start()
mount_nfs(dns_name, path, mountpoint, options)
mount_completed.set()
t.join()
def check_unsupported_options(options):
for unsupported_option in UNSUPPORTED_OPTIONS:
if unsupported_option in options:
warn_message = 'The "%s" option is not supported and has been ignored, as amazon-efs-utils relies on a built-in ' \
'trust store.' % unsupported_option
sys.stderr.write('WARN: %s\n' % warn_message)
logging.warning(warn_message)
del options[unsupported_option]
def check_options_validity(options):
if 'tls' in options:
if 'port' in options:
fatal_error('The "port" and "tls" options are mutually exclusive')
if 'tlsport' in options:
try:
int(options['tlsport'])
except ValueError:
fatal_error('tlsport option [%s] is not an integer' % options['tlsport'])
if 'ocsp' in options and 'noocsp' in options:
fatal_error('The "ocsp" and "noocsp" options are mutually exclusive')
if 'accesspoint' in options:
if 'tls' not in options:
fatal_error('The "tls" option is required when mounting via "accesspoint"')
if not AP_ID_RE.match(options['accesspoint']):
fatal_error('Access Point ID %s is malformed' % options['accesspoint'])
if 'iam' in options and 'tls' not in options:
fatal_error('The "tls" option is required when mounting via "iam"')
if 'awsprofile' in options and 'iam' not in options:
fatal_error('The "iam" option is required when mounting with named profile option, "awsprofile"')
if 'awscredsuri' in options and 'iam' not in options:
fatal_error('The "iam" option is required when mounting with "awscredsuri"')
if 'awscredsuri' in options and 'awsprofile' in options:
fatal_error('The "awscredsuri" and "awsprofile" options are mutually exclusive')
def bootstrap_cloudwatch_logging(config, fs_id=None):
if not check_if_cloudwatch_log_enabled(config):
return None
cloudwatchlog_client = get_botocore_client(config, 'logs')
if not cloudwatchlog_client:
return None
cloudwatchlog_config = get_cloudwatchlog_config(config, fs_id)
log_group_name = cloudwatchlog_config.get('log_group_name')
log_stream_name = cloudwatchlog_config.get('log_stream_name')
retention_days = cloudwatchlog_config.get('retention_days')
group_creation_completed = create_cloudwatch_log_group(cloudwatchlog_client, log_group_name)
if not group_creation_completed:
return None
put_retention_policy_completed = put_cloudwatch_log_retention_policy(cloudwatchlog_client, log_group_name, retention_days)
if not put_retention_policy_completed:
return None
stream_creation_completed = create_cloudwatch_log_stream(cloudwatchlog_client, log_group_name, log_stream_name)
if not stream_creation_completed:
return None
return {
'client': cloudwatchlog_client,
'log_group_name': log_group_name,
'log_stream_name': log_stream_name
}
def create_default_cloudwatchlog_agent_if_not_exist(config):
if not check_if_cloudwatch_log_enabled(config):
return None
global CLOUDWATCHLOG_AGENT
if not CLOUDWATCHLOG_AGENT:
CLOUDWATCHLOG_AGENT = bootstrap_cloudwatch_logging(config)
def get_botocore_client(config, service):
if not BOTOCORE_PRESENT:
logging.error('Failed to import botocore, please install botocore first.')
return None
session = botocore.session.get_session()
region = get_target_region(config)
iam_role_name = get_iam_role_name()
if iam_role_name:
credentials, _ = get_aws_security_credentials_from_instance_metadata(iam_role_name)
if credentials:
return session.create_client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['Token'], region_name=region)
return session.create_client(service, region_name=region)
def get_cloudwatchlog_config(config, fs_id=None):
log_group_name = DEFAULT_CLOUDWATCH_LOG_GROUP
if config.has_option(CLOUDWATCH_LOG_SECTION, 'log_group_name'):
log_group_name = config.get(CLOUDWATCH_LOG_SECTION, 'log_group_name')
retention_days = DEFAULT_RETENTION_DAYS
if config.has_option(CLOUDWATCH_LOG_SECTION, 'retention_in_days'):
retention_days = config.get(CLOUDWATCH_LOG_SECTION, 'retention_in_days')
log_stream_name = get_cloudwatch_log_stream_name(fs_id)
return {
'log_group_name': log_group_name,
'retention_days': int(retention_days),
'log_stream_name': log_stream_name
}
def get_cloudwatch_log_stream_name(fs_id=None):
instance_id = get_instance_identity_info_from_instance_metadata('instanceId')
if instance_id and fs_id:
log_stream_name = '%s - %s - mount.log' % (fs_id, instance_id)
elif instance_id:
log_stream_name = '%s - mount.log' % (instance_id)
elif fs_id:
log_stream_name = '%s - mount.log' % (fs_id)
else:
log_stream_name = 'default - mount.log'
return log_stream_name
def check_if_cloudwatch_log_enabled(config):
if config.has_option(CLOUDWATCH_LOG_SECTION, 'enabled'):
return config.getboolean(CLOUDWATCH_LOG_SECTION, 'enabled')
return False
def cloudwatch_create_log_group_helper(cloudwatchlog_client, log_group_name):
cloudwatchlog_client.create_log_group(
logGroupName=log_group_name
)
logging.info('Created cloudwatch log group %s' % log_group_name)
def create_cloudwatch_log_group(cloudwatchlog_client, log_group_name):
try:
cloudwatch_create_log_group_helper(cloudwatchlog_client, log_group_name)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceAlreadyExistsException':
logging.debug('Log group %s already exist, %s' % (log_group_name, e.response))
return True
elif exception == 'LimitExceededException':
logging.error('Reached the maximum number of log groups that can be created, %s' % e.response)
return False
elif exception == 'OperationAbortedException':
logging.debug('Multiple requests to update the same log group %s were in conflict, %s' % (log_group_name, e.response))
return False
elif exception == 'InvalidParameterException':
logging.error('Log group name %s is specified incorrectly, %s' % (log_group_name, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_put_retention_policy_helper(cloudwatchlog_client, log_group_name, retention_days):
cloudwatchlog_client.put_retention_policy(
logGroupName=log_group_name,
retentionInDays=retention_days
)
logging.debug('Set cloudwatch log group retention days to %s' % retention_days)
def put_cloudwatch_log_retention_policy(cloudwatchlog_client, log_group_name, retention_days):
try:
cloudwatch_put_retention_policy_helper(cloudwatchlog_client, log_group_name, retention_days)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceNotFoundException':
logging.error('Log group %s does not exist, %s' % (log_group_name, e.response))
return False
elif exception == 'OperationAbortedException':
logging.debug('Multiple requests to update the same log group %s were in conflict, %s' % (log_group_name, e.response))
return False
elif exception == 'InvalidParameterException':
logging.error('Either parameter log group name %s or retention in days %s is specified incorrectly, %s'
% (log_group_name, retention_days, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_create_log_stream_helper(cloudwatchlog_client, log_group_name, log_stream_name):
cloudwatchlog_client.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
logging.info('Created cloudwatch log stream %s in log group %s' % (log_stream_name, log_group_name))
def create_cloudwatch_log_stream(cloudwatchlog_client, log_group_name, log_stream_name):
try:
cloudwatch_create_log_stream_helper(cloudwatchlog_client, log_group_name, log_stream_name)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceAlreadyExistsException':
logging.debug('Log stream %s already exist in log group %s, %s' % (log_stream_name, log_group_name, e.response))
return True
elif exception == 'InvalidParameterException':
logging.error('Either parameter log group name %s or log stream name %s is specified incorrectly, %s'
% (log_group_name, log_stream_name, e.response))
return False
elif exception == 'ResourceNotFoundException':
logging.error('Log group %s does not exist, %s' % (log_group_name, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_put_log_events_helper(cloudwatchlog_agent, message, token=None):
kwargs = {
'logGroupName': cloudwatchlog_agent.get('log_group_name'),
'logStreamName': cloudwatchlog_agent.get('log_stream_name'),
'logEvents': [
{
'timestamp': int(round(time.time() * 1000)),
'message': message
}
]
}
if token:
kwargs['sequenceToken'] = token
cloudwatchlog_agent.get('client').put_log_events(**kwargs)
def publish_cloudwatch_log(cloudwatchlog_agent, message):
if not cloudwatchlog_agent or not cloudwatchlog_agent.get('client'):
return False
token = get_log_stream_next_token(cloudwatchlog_agent)
try:
cloudwatch_put_log_events_helper(cloudwatchlog_agent, message, token)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'InvalidSequenceTokenException':
logging.debug('The sequence token is not valid, %s' % e.response)
return False
elif exception == 'InvalidParameterException':
logging.debug('One of the parameter to put log events is not valid, %s' % e.response)
return False
elif exception == 'DataAlreadyAcceptedException':
logging.debug('The event %s was already logged, %s' % (message, e.response))
return False
elif exception == 'UnrecognizedClientException':
logging.debug('The most likely cause is an invalid AWS access key ID or secret Key, %s' % e.response)
return False
elif exception == 'ResourceNotFoundException':
logging.error('Either log group %s or log stream %s does not exist, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
return False
else:
logging.debug('Unexpected error: %s' % e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_describe_log_streams_helper(cloudwatchlog_agent):
return cloudwatchlog_agent.get('client').describe_log_streams(
logGroupName=cloudwatchlog_agent.get('log_group_name'),
logStreamNamePrefix=cloudwatchlog_agent.get('log_stream_name')
)
def get_log_stream_next_token(cloudwatchlog_agent):
try:
response = cloudwatch_describe_log_streams_helper(cloudwatchlog_agent)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'InvalidParameterException':
logging.debug('Either parameter log group name %s or log stream name %s is specified incorrectly, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
elif exception == 'ResourceNotFoundException':
logging.debug('Either log group %s or log stream %s does not exist, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
else:
handle_general_botocore_exceptions(e)
return None
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return None
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return None
except Exception as e:
logging.warning('Unknown error, %s' % e)
return None
try:
log_stream = response['logStreams'][0]
return log_stream.get('uploadSequenceToken')
except (IndexError, TypeError, KeyError):
pass
return None
def handle_general_botocore_exceptions(error):
exception = error.response['Error']['Code']
if exception == 'ServiceUnavailableException':
logging.debug('The service cannot complete the request, %s' % error.response)
elif exception == 'AccessDeniedException':
logging.debug('User is not authorized to perform the action, %s' % error.response)
else:
logging.debug('Unexpected error: %s' % error)
def main():
parse_arguments_early_exit()
assert_root()
config = read_config()
bootstrap_logging(config)
fs_id, path, mountpoint, options = parse_arguments(config)
logging.info('version=%s options=%s', VERSION, options)
global CLOUDWATCHLOG_AGENT
CLOUDWATCHLOG_AGENT = bootstrap_cloudwatch_logging(config, fs_id)
check_unsupported_options(options)
check_options_validity(options)
init_system = get_init_system()
check_network_status(fs_id, init_system)
dns_name = get_dns_name(config, fs_id)
if 'tls' in options:
mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options)
else:
mount_nfs(dns_name, path, mountpoint, options)
if '__main__' == __name__:
main()
| 38.560583
| 130
| 0.680535
| 10,057
| 76,697
| 4.939147
| 0.107984
| 0.005234
| 0.010871
| 0.005818
| 0.40829
| 0.331186
| 0.284038
| 0.230407
| 0.202988
| 0.164556
| 0
| 0.007997
| 0.222316
| 76,697
| 1,988
| 131
| 38.57998
| 0.8248
| 0.092663
| 0
| 0.225897
| 0
| 0.014075
| 0.198127
| 0.023725
| 0
| 0
| 0
| 0
| 0.002111
| 1
| 0.069669
| false
| 0.002815
| 0.024631
| 0.003519
| 0.194229
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a01fe7f065ff8fbb40e8cf44137b52463e1417c
| 1,010
|
py
|
Python
|
upcfcardsearch/c8.py
|
ProfessorSean/Kasutamaiza
|
7a69a69258f67bbb88bebbac6da4e6e1434947e6
|
[
"MIT"
] | null | null | null |
upcfcardsearch/c8.py
|
ProfessorSean/Kasutamaiza
|
7a69a69258f67bbb88bebbac6da4e6e1434947e6
|
[
"MIT"
] | null | null | null |
upcfcardsearch/c8.py
|
ProfessorSean/Kasutamaiza
|
7a69a69258f67bbb88bebbac6da4e6e1434947e6
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from discord.utils import get
class c8(commands.Cog, name="c8"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Sacrosanct Devouring Pyre',
color=0xBC5A84)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type', value='Trap/Normal', inline=False)
embed.add_field(name='Card Effect', value='Tribute 2 monsters, then target 2 monsters; destroy those targets. You can only activate 1 "Sacrosanct Devouring Pyre" per turn.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c8(bot))
| 43.913043
| 195
| 0.687129
| 138
| 1,010
| 4.934783
| 0.565217
| 0.048458
| 0.101322
| 0.07489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032767
| 0.184158
| 1,010
| 23
| 196
| 43.913043
| 0.793689
| 0
| 0
| 0
| 0
| 0.055556
| 0.323442
| 0.0455
| 0
| 0
| 0.007913
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a023f8c8af70de4e0b8e937c5773e7da489fab5
| 2,627
|
py
|
Python
|
SVMmodel_withSKF.py
|
tameney22/DCI-Capstone
|
6f59541f16030bfa3f0a706fd9f0e4394e1ee974
|
[
"MIT"
] | null | null | null |
SVMmodel_withSKF.py
|
tameney22/DCI-Capstone
|
6f59541f16030bfa3f0a706fd9f0e4394e1ee974
|
[
"MIT"
] | null | null | null |
SVMmodel_withSKF.py
|
tameney22/DCI-Capstone
|
6f59541f16030bfa3f0a706fd9f0e4394e1ee974
|
[
"MIT"
] | null | null | null |
"""
This script is where the preprocessed data is used to train the SVM model to
perform the classification. I am using Stratified K-Fold Cross Validation to
prevent bias and/or any imbalance that could affect the model's accuracy.
REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34
"""
import numpy as np
import pandas as pd
from sklearn import model_selection, svm
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import StratifiedKFold
# Open preproccessed csv
df = pd.read_csv("preprocessed.csv", index_col=0)
print(df.head())
print("SPLITTING TRAIN-TEST")
x = df["Text"]
y = df["PublicationTitle"]
train_x, test_x, train_y, test_y = model_selection.train_test_split(
df["Text"], df["PublicationTitle"], test_size=0.3)
# Label encode the target variable to transform categorical data of string
# type into numerical values the model can understand
encoder = LabelEncoder()
# train_y = encoder.fit_transform(train_y)
# test_y = encoder.fit_transform(test_y)
# Word vectorization
# turning a collection of text documents into numerical feature vectors
# We are using Term Frequency - Inverse Document
tfidf_vect = TfidfVectorizer(max_features=5000)
tfidf_vect.fit(df["Text"])
# train_x_tfidf = tfidf_vect.transform(train_x)
# test_x_tfidf = tfidf_vect.transform(test_x)
x_tfidf = tfidf_vect.transform(df["Text"])
y = encoder.fit_transform(y)
# print(tfidf_vect.vocabulary_)
# Fit the training dataset to the classifier
print("TRAINING THE MODEL")
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
accuracies = []
fold = 1
for train_idx, test_idx in skf.split(x, y):
print("Working on fold", fold)
x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx]
y_train_fold, y_test_fold = y[train_idx], y[test_idx]
SVM.fit(x_train_fold, y_train_fold)
acc = SVM.score(x_test_fold, y_test_fold)
print("Acc", fold, ":", acc)
accuracies.append(acc)
fold += 1
print("ACCURACIES:", accuracies)
print("Max Accuracy:", np.max(accuracies))
print("Min Accuracy:", np.min(accuracies))
print("Mean of Accuracies:", np.mean(accuracies))
print("STD of Accuracies:", np.std(accuracies))
# print("RUNNING TEST PREDICTIONS")
# predictions = SVM.predict(test_x_tfidf)
# # Calculate accuracy score
# accuracy = accuracy_score(test_y, predictions)
# print("Accuracy:", str(accuracy * 100) + "%")
| 31.650602
| 132
| 0.760183
| 396
| 2,627
| 4.876263
| 0.386364
| 0.027965
| 0.01709
| 0.031072
| 0.037286
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011329
| 0.12638
| 2,627
| 82
| 133
| 32.036585
| 0.830065
| 0.412257
| 0
| 0
| 0
| 0
| 0.134957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.184211
| 0
| 0.184211
| 0.263158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a048666edf3e5d75a0ded13639990b1d6bed2e8
| 33,554
|
py
|
Python
|
src/consensus.py
|
dschwoerer/samscripts
|
caee697e96a0639b7a4f9db02f70f4fd92b39ef9
|
[
"MIT"
] | null | null | null |
src/consensus.py
|
dschwoerer/samscripts
|
caee697e96a0639b7a4f9db02f70f4fd92b39ef9
|
[
"MIT"
] | null | null | null |
src/consensus.py
|
dschwoerer/samscripts
|
caee697e96a0639b7a4f9db02f70f4fd92b39ef9
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# Copyright Ivan Sovic, 2015. www.sovic.org
#
# Creates a pileup from a given SAM/BAM file, and calls consensus bases (or variants).
import os
import sys
import operator
import subprocess
def increase_in_dict(dict_counter, value):
try:
dict_counter[value] += 1
except:
dict_counter[value] = 1
def process_mpileup_line(
line,
line_number,
ret_variant_list,
ret_vcf_list,
ret_snp_count,
ret_insertion_count,
ret_deletion_count,
ret_num_undercovered_bases,
ret_num_called_bases,
ret_num_correct_bases,
ret_coverage_sum,
coverage_threshold,
verbose=False,
):
# Split the line, and perform a sanity check.
split_line = line.strip().split("\t")
if len(split_line) < 5 or len(split_line) > 6:
sys.stderr.write(line + "\n")
return 0
ref_name = split_line[0]
position = split_line[1]
ref_base = split_line[2]
coverage = split_line[3]
original_bases = split_line[4]
if len(split_line) == 6:
qualities = split_line[5]
bases = ""
# Replace the '.' and ',' signs with the actual reference base.
i = 0
while i < len(original_bases):
if original_bases[i] == "." or original_bases[i] == ",":
bases += ref_base
else:
bases += original_bases[i]
i += 1
base_counts = {}
insertion_count = 0
current_base_deletion_count = 0
deletion_count = 0
insertion_event_counts = {}
deletion_event_counts = {}
end_counts = 0
# print 'position: %s' % position;
# print 'bases: "%s"' % bases;
# print 'line_number: %d' % line_number;
# print line;
# print '';
# sys.stdout.flush();
i = 0
while i < len(bases):
base = bases[i]
if base == r"^":
# This is the starting position of a read. It encodes two
# symbols: '^' marking the read start and a char marking the
# mapping quality of the read.
# increase_in_dict(base_counts, bases[i + 1].upper());
i += 1
# Increase only by 1, because we have i += 1 down there.
elif base == r"$":
# This marks the end of a read.
end_counts += 1
elif base == r"*":
# This is a deletion, just count it.
current_base_deletion_count += 1
elif base == r"-":
# This marks the occurance of deletions. It is a composite object
# consisting of: the special character '-', the number of the deleted bases
# and the actual bases that are deleted (these bases follow the current position).
# In our approach, we ignore this case, because we count deletions one by one
# through the '*' character.
# Get the number of bases that need to be skipped in the string.
j = i + 1
while bases[j] in "0123456789":
j += 1
num_bases = int(bases[(i + 1) : j])
skip_bases = (j - i) + num_bases - 1
deletion_count += 1
deletion = bases[j : (j + num_bases)].upper()
increase_in_dict(deletion_event_counts, deletion)
# Skip the length of the numeric entry plus the actual number of bases
# that need to be skipped.
i += skip_bases
elif base == r"+":
# This marks the occurance of an insertion. It is a composite object
# consisting of: the special character '+', the number of the inserted bases
# and the actual bases that are inserted (these bases follow the current position).
# Similar to the deletion marking, but here we actually care about the bases,
# and we need to make an allele aware count.
# Get the number of bases that are inserted;
j = i + 1
while bases[j] in "0123456789":
j += 1
num_bases = int(bases[(i + 1) : j])
skip_bases = (j - i) + num_bases - 1
insertion_count += 1
insertion = bases[j : (j + num_bases)].upper()
increase_in_dict(insertion_event_counts, insertion)
i += skip_bases
else:
increase_in_dict(base_counts, bases[i].upper())
i += 1
# TODO: An additional problematic case, discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup.
# There are pileup bases that do not have any actual bases, but only the '*' symbols. How should this be handled properly?
# Example line from the mpileup file:
# gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D-
# I chose to handle them as undercovered bases.
non_indel_coverage_current_base = int(coverage) - current_base_deletion_count
if verbose == True:
sys.stdout.write("%s\nbase_counts: %s\n" % (line.strip(), str(base_counts)))
# EDIT: Previously I compared the total coverage of the current base with the coverage threshold.
# However, the total coverage also accounts for the deletions denoted with the '*' sign, which I think
# isn't relevant, as deletions are counted prior to occuring, and at that point is already decided if there is going
# to be a deletion event. If we wound up at this base (i.e. this base didn't get skipped because of a deletion
# consensus), then the deletions on this base are ignored.
# if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count):
# if (non_indel_coverage_current_base < coverage_threshold):
if int(coverage) < coverage_threshold:
ret_num_undercovered_bases[0] += 1
# ret_coverage_sum[0] += 0;
ret_coverage_sum[0] += int(coverage)
# TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
sorted_base_counts = [["A", 0], ["C", 0], ["T", 0], ["G", 0]]
sorted_base_counts = sorted(
list(base_counts.items()), key=operator.itemgetter(1)
)
try:
most_common_base_count = sorted_base_counts[-1][1]
except Exception as e:
most_common_base_count = 0
pass
# variant_line = 'undercovered1\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
variant_line = (
"undercovered1\tpos = %s\tref = %s\tcoverage = %d\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s"
% (
position,
ref_name,
int(coverage),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
)
)
ret_variant_list.append(variant_line)
### VCF output ###
qual = 1000
info = "DP=%s;TYPE=snp" % (coverage)
ref_field = ref_base
alt_field = "N"
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
else:
ret_num_called_bases[0] += 1
ret_coverage_sum[0] += int(coverage)
# TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
most_common_base_count = 0
### Handling base consensus.
sorted_base_counts = sorted(
list(base_counts.items()), key=operator.itemgetter(1)
)
try:
most_common_base_count = sorted_base_counts[-1][1]
except Exception as e:
pass
# sys.stderr.write(str(e) + '\n');
# sys.stderr.write('sorted_base_counts:\n');
# sys.stderr.write(str(sorted_base_counts) + '\n');
# sys.stderr.write('base_counts:\n');
# sys.stderr.write(str(base_counts) + '\n');
# sys.stderr.write('original_bases:\n');
# sys.stderr.write(str(original_bases) + '\n');
# sys.stderr.write('line:\n');
# sys.stderr.write(line.strip() + '\n');
# most_common_base_count = 0;
# Allow for the case where there are multiple equally good choices.
# In this case, we prefer the choice which is equal to the reference.
is_good = False
for base_count in sorted_base_counts:
if base_count[1] == most_common_base_count:
if base_count[0] == ref_base:
is_good = True
break
if is_good == False:
if len(sorted_base_counts) > 0:
ret_snp_count[0] += 1
# ret_variant_list.append(line_number);
variant_line = (
"SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
int(coverage),
non_indel_coverage_current_base,
most_common_base_count,
ref_base,
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0])),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=snp" % (coverage)
ref_field = ref_base
alt_field = alt_base
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
else:
sys.stderr.write(
"\nWarning: a SNP was detected, but there were no bases in the sorted_base_counts!"
)
variant_line = (
"SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
int(coverage),
non_indel_coverage_current_base,
most_common_base_count,
ref_base,
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0])),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
sys.stderr.write("\n")
else:
ret_num_correct_bases[0] += 1
if verbose == True:
sys.stdout.write("Reference base: %s\n" % (ref_base))
sys.stdout.write("Consensus base: %s\n\n" % (base_count[0]))
# if (int(position) == 100000 or int(position) == 1000000 or int(position) == 2000000 or int(position) == 3000000 or int(position) == 4000000):
# print '\nTEST\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
### Handling indel consensus.
### Put a different coverage threshold. Here we are interested even in the reads
### which had a '*' at the current position (because we don't know where it ends).
non_indel_coverage_next_base = (
int(coverage) - end_counts - deletion_count - insertion_count
)
if (
non_indel_coverage_next_base + deletion_count + insertion_count
) > coverage_threshold:
# Sanity check, just to see if there actually were any insertions (to avoid index out of bounds error).
# If there are insertions, get the most common one.
if len(list(insertion_event_counts.keys())) > 0:
sorted_insertion_counts = sorted(
list(insertion_event_counts.items()), key=operator.itemgetter(1)
)
most_common_insertion_count = sorted_insertion_counts[-1][1]
most_common_insertion_length = len(sorted_insertion_counts[-1][0])
insertion_unique = (
True
if (
sum(
[
int(insertion_count[1] == most_common_insertion_count)
for insertion_count in sorted_insertion_counts
]
)
== 1
)
else False
)
else:
most_common_insertion_count = 0
most_common_insertion_length = 0
insertion_unique = False
# Sanity check, just to see if there actually were any deletions (to avoid index out of bounds error).
# If there are deletions, get the most common one.
if len(list(deletion_event_counts.keys())) > 0:
sorted_deletion_counts = sorted(
list(deletion_event_counts.items()), key=operator.itemgetter(1)
)
most_common_deletion_count = sorted_deletion_counts[-1][1]
most_common_deletion_length = len(sorted_deletion_counts[-1][0])
deletion_unique = (
True
if (
sum(
[
int(deletion_count[1] == most_common_deletion_count)
for deletion_count in sorted_deletion_counts
]
)
== 1
)
else False
)
else:
most_common_deletion_count = 0
most_common_deletion_length = 0
deletion_unique = False
if (
most_common_insertion_count > most_common_deletion_count
and most_common_insertion_count > non_indel_coverage_next_base
):
# In this case, insertions are a clear winner.
if insertion_unique == True:
# ret_insertion_count[0] += most_common_insertion_length;
ret_insertion_count[0] += 1
ret_num_called_bases[0] += most_common_insertion_length
# variant_line = 'insertion\t%d\t%s\t%s\t%s\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
try:
temp_sorted_bc = sorted_base_counts[-1][0]
except:
temp_sorted_bc = 0
indel_length = most_common_insertion_length
variant_line = (
"ins\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_insertion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
non_indel_coverage_next_base,
non_indel_coverage_current_base,
most_common_insertion_count,
ref_base,
temp_sorted_bc,
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### Insertions in the VCF format specifies the position where a insertion occurs. The ref position should contain the base which is the same as ref, but the alt field contains the ref base + the insertion event.
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=ins" % (coverage)
ref_field = ref_base
alt_field = "%s%s" % (ref_base, sorted_insertion_counts[-1][0])
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
elif (
most_common_deletion_count > most_common_insertion_count
and most_common_deletion_count > non_indel_coverage_next_base
):
# In this case, deletions are a clear winner.
if deletion_unique == True:
# ret_deletion_count[0] += most_common_deletion_length;
ret_deletion_count[0] += 1
# variant_line = 'deletion\t%d\t%s\t%s\t%s\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
# return most_common_deletion_length;
variant_line = (
"del\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_deletion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
non_indel_coverage_next_base,
non_indel_coverage_current_base,
most_common_deletion_count,
ref_base,
sorted_base_counts[-1][0],
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### Deletions in the VCF format specifies the position where a deletion occurs, with the first base being non-deletion, and the following bases being a deletion event.
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=del" % (coverage)
ref_field = "%s%s" % (ref_base, sorted_deletion_counts[-1][0])
alt_field = ref_base
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
return most_common_deletion_length
else:
# In this case, either the base count consensus wins, or the
# insertion/deletion count is ambiguous.
pass
return 0
def process_mpileup(
alignments_path,
reference_path,
mpileup_path,
coverage_threshold,
output_prefix,
thread_id=0,
bed_position="",
):
fp = None
try:
fp = open(mpileup_path, "r")
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for reading!\n' % mpileup_path
)
return None
ret_variant_list = []
ret_vcf_list = []
ret_snp_count = [0]
ret_insertion_count = [0]
ret_deletion_count = [0]
ret_num_undercovered_bases = [0]
ret_num_called_bases = [0]
ret_num_correct_bases = [0]
ret_coverage_sum = [0]
# lines = fp.readlines();
fp_variant = None
fp_vcf = None
if output_prefix != "":
if not os.path.exists(os.path.dirname(output_prefix)):
os.makedirs(os.path.dirname(output_prefix))
variant_file = "%s-cov_%d.variant.csv" % (output_prefix, coverage_threshold)
fp_variant = open(variant_file, "w")
vcf_file = "%s-cov_%d.variant.vcf" % (output_prefix, coverage_threshold)
fp_vcf = open(vcf_file, "w")
fp_vcf.write("##fileformat=VCFv4.0\n")
fp_vcf.write("##fileDate=20150409\n")
fp_vcf.write("##source=%s\n" % (" ".join(sys.argv)))
fp_vcf.write("##reference=%s\n" % reference_path)
fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description="Raw Depth">\n')
fp_vcf.write(
'##INFO=<ID=TYPE,Number=A,Type=String,Description="Type of each allele (snp, ins, del, mnp, complex)">\n'
)
fp_vcf.write(
'##INFO=<ID=AF,Number=1,Type=Float,Description="Allele Frequency">\n'
)
fp_vcf.write(
'##INFO=<ID=SB,Number=1,Type=Integer,Description="Phred-scaled strand bias at this position">\n'
)
fp_vcf.write(
'##INFO=<ID=DP4,Number=4,Type=Integer,Description="Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases">\n'
)
fp_vcf.write(
'##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Indicates that the variant is an INDEL.">\n'
)
fp_vcf.write(
'##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description="Indicates that the variant is a consensus variant (as opposed to a low frequency variant).">\n'
)
fp_vcf.write(
'##INFO=<ID=HRUN,Number=1,Type=Integer,Description="Homopolymer length to the right of report indel position">\n'
)
fp_vcf.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
fp_vcf.flush()
use_bed = False
bed_chromosome = ""
bed_pos_start = 0
# bed_pos_end = len(lines);
bed_pos_end = -1
if bed_position != "":
bed_split = bed_position.split(":")
if len(bed_split) != 2:
use_bed = False
else:
bed_chromosome = bed_split[0]
bed_pos_split = bed_split[1].split("-")
if len(bed_pos_split) != 2:
use_bed = False
else:
bed_pos_start = int(bed_pos_split[0])
bed_pos_end = int(bed_pos_split[1])
use_bed = True
sys.stderr.write("Using location specified through commandline:\n")
sys.stderr.write('\tChromosome: "%s"\n' % bed_chromosome)
sys.stderr.write("\tStart: %d\n" % bed_pos_start)
sys.stderr.write("\tEnd: %d\n\n" % bed_pos_end)
# i = 0;
i = 0 if (use_bed == False) else max((bed_pos_start - 10), 0)
j = 0
# while (i < bed_pos_end): # len(lines)):
num_bases_to_skip = 0
for line in fp:
# line = lines[i];
if num_bases_to_skip > 0:
num_bases_to_skip -= 1
continue
if use_bed == True:
line_split = line.strip().split("\t")
if len(line_split) > 2 and line_split[0] == bed_chromosome:
current_pos = int(line_split[1])
if current_pos < bed_pos_start or current_pos >= bed_pos_end:
i += 1
j += 1
continue
else:
# print line_split[0];
# print bed_chromosome;
i += 1
j += 1
continue
if thread_id == 0:
if (j % 1000) == 0:
sys.stderr.write(
"\r[%d] snps = %d, insertions = %d, deletions = %d, undercovered = %d, coverage = %.2f"
% (
i,
ret_snp_count[0],
ret_insertion_count[0],
ret_deletion_count[0],
ret_num_undercovered_bases[0],
(float(ret_coverage_sum[0]) / float((i + 1))),
)
)
sys.stderr.flush()
variant_list_length = len(ret_variant_list)
vcf_list_length = len(ret_vcf_list)
num_bases_to_skip = process_mpileup_line(
line,
i,
ret_variant_list,
ret_vcf_list,
ret_snp_count,
ret_insertion_count,
ret_deletion_count,
ret_num_undercovered_bases,
ret_num_called_bases,
ret_num_correct_bases,
ret_coverage_sum,
coverage_threshold,
verbose=use_bed,
)
if len(ret_variant_list) > variant_list_length and fp_variant != None:
fp_variant.write("\n".join(ret_variant_list[variant_list_length:]) + "\n")
fp_variant.flush()
if len(ret_vcf_list) > vcf_list_length and fp_vcf != None:
fp_vcf.write("\n".join(ret_vcf_list[vcf_list_length:]) + "\n")
fp_vcf.flush()
i += num_bases_to_skip
i += 1
j += 1
# if (i > 10000):
# break;
fp.close()
sys.stderr.write("\n")
if fp_variant != None:
fp_variant.close()
if fp_vcf != None:
fp_vcf.close()
summary_lines = ""
summary_lines += "alignments_file: %s\n" % alignments_path
summary_lines += "mpileup_file: %s\n" % mpileup_path
summary_lines += "coverage_threshold: %d\n" % coverage_threshold
summary_lines += "snp_count: %d\n" % ret_snp_count[0]
summary_lines += "insertion_count: %d\n" % ret_insertion_count[0]
summary_lines += "deletion_count: %d\n" % ret_deletion_count[0]
summary_lines += "num_undercovered_bases: %d\n" % ret_num_undercovered_bases[0]
summary_lines += "num_called_bases: %d\n" % ret_num_called_bases[0]
summary_lines += "num_correct_bases: %d\n" % ret_num_correct_bases[0]
summary_lines += "average_coverage: %.2f\n" % (
(float(ret_coverage_sum[0]) / float((i + 1)))
)
sys.stderr.write(summary_lines + "\n")
sys.stderr.write("\n")
if output_prefix != "":
# summary_file = output_prefix + '.conssum';
summary_file = "%s-cov_%d.variant.sum" % (output_prefix, coverage_threshold)
try:
fp_sum = open(summary_file, "w")
fp_sum.write(summary_lines)
fp_sum.close()
return summary_file
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for writing!\n' % (summary_file)
)
return None
return None
def main(
alignments_path,
reference_path,
coverage_threshold,
output_prefix,
thread_id=0,
bed_position="",
):
# Sanity checking the existence of the file, and the correctness of its extension.
# Also, if input file is a SAM file, then convert it to a sorted BAM.
alignments_path_bam = alignments_path
if os.path.exists(alignments_path) == False:
sys.stderr.write('ERROR: File "%s" does not exist!\n' % alignments_path)
return
if alignments_path.endswith("sam"):
# Determine the path where the new BAM file will be generated.
dir_name = os.path.dirname(alignments_path)
if dir_name == "":
dir_name = "."
alignments_path_bam = (
dir_name
+ "/"
+ os.path.splitext(os.path.basename(alignments_path))[0]
+ ".bam"
)
alignments_path_bam_exists = os.path.exists(alignments_path_bam)
# Check if a BAM file with the given name already exists.
if alignments_path_bam_exists == False or (
alignments_path_bam_exists == True
and os.path.getmtime(alignments_path)
> os.path.getmtime(alignments_path_bam)
):
# Convert the SAM file to a sorted BAM file.
command = "samtools view -bS %s | samtools sort - %s" % (
alignments_path,
os.path.splitext(alignments_path_bam)[0],
)
sys.stderr.write(command + "\n")
subprocess.call(command, shell="True")
# Create the BAM index file.
command = "samtools index %s %s.bai" % (
alignments_path_bam,
alignments_path_bam,
)
subprocess.call(command, shell="True")
elif alignments_path.endswith("bam") == False:
sys.stderr.write(
'ERROR: File extension needs to be either .sam or .bam! Input file path: "%s".\n'
% alignments_path
)
return
# Convert the sorted BAM file to a mpileup file if it doesn't exist yet.
mpileup_path = "%s.mpileup" % alignments_path_bam
mpileup_exists = os.path.exists(mpileup_path)
if mpileup_exists == False or (
mpileup_exists == True
and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path)
):
command = "samtools mpileup -B -d 1000000 -Q 0 -A -f %s %s > %s.mpileup" % (
reference_path,
alignments_path_bam,
alignments_path_bam,
)
subprocess.call(command, shell="True")
sys.stderr.write('Processing file "%s"...\n' % alignments_path)
sys.stderr.write('Reference file "%s"...\n' % reference_path)
sys.stderr.write("Coverage threshold: %d\n" % coverage_threshold)
summary_file = process_mpileup(
alignments_path,
reference_path,
("%s.mpileup" % alignments_path_bam),
coverage_threshold,
output_prefix,
thread_id,
bed_position,
)
def CollectSummaries(
sam_files, prefix_for_intermediate_results, collective_output_file
):
fp_collect = None
try:
fp_collect = open(collective_output_file, "w")
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for writing!\n' % collective_output_file
)
return
for sam_file in sam_files:
summary_file = prefix_for_intermediate_results + ".sum"
try:
fp_sum = open(summary_file, "r")
lines = fp_sum.readlines()
fp_sum.close()
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for reading!\n' % summary_file
)
continue
fp_collect.write("".join(lines) + "\n")
fp_collect.close()
if __name__ == "__main__":
# if (len(sys.argv) < 5):
# sys.stderr.write('Usage:\n');
# sys.stderr.write('\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\n' % sys.argv[0]);
# sys.stderr.write('\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n');
# exit(1);
if len(sys.argv) < 5:
sys.stderr.write("Usage:\n")
sys.stderr.write(
"\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\n"
% sys.argv[0]
)
sys.stderr.write(
'\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n'
)
sys.stderr.write(
'\tPosition parameter is a string specifying "chromosome:start-end"\n\n'
)
exit(1)
reference_file = sys.argv[1]
coverage_threshold = int(sys.argv[2])
output_prefix = sys.argv[3]
sam_file = sys.argv[4]
bed_position = ""
if len(sys.argv) > 5:
bed_position = sys.argv[5]
# sys.stderr.write('bed_position: "%s"\n\n' % bed_position);
processes = []
if output_prefix == "-":
output_prefix = os.path.splitext(sam_file)[0]
main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position)
# if (output_prefix != '-'):
# CollectSummaries([sam_file], output_prefix, output_prefix + '.variant.sum');
| 39.755924
| 436
| 0.550933
| 4,042
| 33,554
| 4.313953
| 0.114547
| 0.023513
| 0.03051
| 0.016345
| 0.535413
| 0.445375
| 0.385158
| 0.3453
| 0.332454
| 0.308941
| 0
| 0.015717
| 0.343893
| 33,554
| 843
| 437
| 39.803084
| 0.776334
| 0.216695
| 0
| 0.456656
| 0
| 0.03096
| 0.133988
| 0.041133
| 0
| 0
| 0
| 0.001186
| 0
| 1
| 0.00774
| false
| 0.010836
| 0.006192
| 0
| 0.029412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a049ff78a91de998072b637d1639d25a433a194
| 5,867
|
py
|
Python
|
web/addons/account_payment/wizard/account_payment_populate_statement.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | null | null | null |
web/addons/account_payment/wizard/account_payment_populate_statement.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | null | null | null |
web/addons/account_payment/wizard/account_payment_populate_statement.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class account_payment_populate_statement(osv.osv_memory):
_name = "account.payment.populate.statement"
_description = "Account Payment Populate Statement"
_columns = {
'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines')
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
line_obj = self.pool.get('payment.line')
res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
line_ids = line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('bank_statement_line_id', '=', False),
('move_line_id.state','=','valid')])
line_ids.extend(line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('order_id.mode', '=', False),
('move_line_id.state','=','valid')]))
domain = '[("id", "in", '+ str(line_ids)+')]'
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='lines']")
for node in nodes:
node.set('domain', domain)
res['arch'] = etree.tostring(doc)
return res
def populate_statement(self, cr, uid, ids, context=None):
line_obj = self.pool.get('payment.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
voucher_obj = self.pool.get('account.voucher')
voucher_line_obj = self.pool.get('account.voucher.line')
move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['lines']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
statement = statement_obj.browse(cr, uid, context['active_id'], context=context)
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field exists no more now
amount = currency_obj.compute(cr, uid, line.currency.id,
statement.currency.id, line.amount_currency, context=ctx)
if not line.move_line_id.id:
continue
context = dict(context, move_line_ids=[line.move_line_id.id])
result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context)
if line.move_line_id:
voucher_res = {
'type': 'payment',
'name': line.name,
'partner_id': line.partner_id.id,
'journal_id': statement.journal_id.id,
'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id),
'company_id': statement.company_id.id,
'currency_id': statement.currency.id,
'date': line.date or time.strftime('%Y-%m-%d'),
'amount': abs(amount),
'period_id': statement.period_id.id,
}
voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context)
voucher_line_dict = {}
for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']:
move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context)
if line.move_line_id.move_id.id == move_line.move_id.id:
voucher_line_dict = line_dict
if voucher_line_dict:
voucher_line_dict.update({'voucher_id': voucher_id})
voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)
st_line_id = statement_line_obj.create(cr, uid, {
'name': line.order_id.reference or '?',
'amount': - amount,
'partner_id': line.partner_id.id,
'statement_id': statement.id,
'ref': line.communication,
}, context=context)
line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 48.891667
| 250
| 0.592466
| 723
| 5,867
| 4.596127
| 0.26971
| 0.027084
| 0.027084
| 0.033704
| 0.290701
| 0.222389
| 0.136624
| 0.058381
| 0.058381
| 0.058381
| 0
| 0.003465
| 0.262144
| 5,867
| 119
| 251
| 49.302521
| 0.764149
| 0.154082
| 0
| 0.097561
| 0
| 0
| 0.160677
| 0.052863
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.036585
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a051324d6c23235da009880d6bcb0d30ed4d8dc
| 315
|
py
|
Python
|
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/02-Repeat-Strings.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/02-Repeat-Strings.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/02-Repeat-Strings.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
# 2. Repeat Strings
# Write a Program That Reads a list of strings. Each string is repeated N times, where N is the length of the string. Print the concatenated string.
strings = input().split()
output_string = ""
for string in strings:
N = len(string)
output_string += string * N
print(output_string)
| 22.5
| 148
| 0.71746
| 49
| 315
| 4.55102
| 0.55102
| 0.161435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003984
| 0.203175
| 315
| 13
| 149
| 24.230769
| 0.884462
| 0.520635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a0724ca0ed93e378a29473e0b6b5911cc4be4e6
| 944
|
py
|
Python
|
algorithm/dfs/boj_1260.py
|
ruslanlvivsky/python-algorithm
|
2b49bed33cd0e95b8a1e758008191f4392b3f667
|
[
"MIT"
] | 3
|
2021-07-18T14:40:24.000Z
|
2021-08-14T18:08:13.000Z
|
algorithm/dfs/boj_1260.py
|
jinsuSang/python-algorithm
|
524849a0a7e71034d329fef63c4f384930334177
|
[
"MIT"
] | null | null | null |
algorithm/dfs/boj_1260.py
|
jinsuSang/python-algorithm
|
524849a0a7e71034d329fef63c4f384930334177
|
[
"MIT"
] | null | null | null |
def dfs(V):
print(V, end=' ')
visited[V] = True
for n in graph[V]:
if not visited[n]:
dfs(n)
def dfs_s(V):
stack = [V]
visited[V] = True
while stack:
now = stack.pop()
print(now, end=' ')
for n in graph[now]:
if not visited[n]:
stack.append(n)
visited[n] = True
def bfs(V):
visited[V] = True
queue = [V]
while queue:
now = queue.pop(0)
print(now, end=' ')
for n in graph[now]:
if not visited[n]:
queue.append(n)
visited[n] = True
N, M, V = map(int, input().strip().split())
visited = [False] * (N + 1)
graph = [[] for _ in range(N + 1)]
for i in range(M):
a, b = map(int, input().strip().split())
graph[a].append(b)
graph[b].append(a)
for i in range(1, N + 1):
graph[i].sort()
dfs(V)
visited = [False] * (N + 1)
print()
bfs(V)
| 19.265306
| 44
| 0.470339
| 141
| 944
| 3.134752
| 0.248227
| 0.090498
| 0.081448
| 0.074661
| 0.352941
| 0.171946
| 0.171946
| 0.171946
| 0.171946
| 0.171946
| 0
| 0.009901
| 0.358051
| 944
| 48
| 45
| 19.666667
| 0.719472
| 0
| 0
| 0.358974
| 0
| 0
| 0.003178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.076923
| 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a07aa532405a92d53e9ed5f46dcbcbd7a845cfa
| 634
|
py
|
Python
|
redirector.py
|
UKPLab/DiGAT
|
b044648a6c79428872a778908d3a8a689f0ac3e6
|
[
"Apache-2.0"
] | 8
|
2016-06-22T17:02:45.000Z
|
2020-11-16T23:46:13.000Z
|
redirector.py
|
UKPLab/DiGAT
|
b044648a6c79428872a778908d3a8a689f0ac3e6
|
[
"Apache-2.0"
] | null | null | null |
redirector.py
|
UKPLab/DiGAT
|
b044648a6c79428872a778908d3a8a689f0ac3e6
|
[
"Apache-2.0"
] | 1
|
2019-02-25T04:40:04.000Z
|
2019-02-25T04:40:04.000Z
|
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
__author__ = "Artem Vovk, Roland Kluge, and Christian Kirschner"
__copyright__ = "Copyright 2013-2015 UKP TU Darmstadt"
__credits__ = ["Artem Vovk", "Roland Kluge", "Christian Kirschner"]
__license__ = "ASL"
class Redirector(webapp.RequestHandler):
def get(self):
self.redirect("/argunit/home")
def post(self):
self.redirect("/argunit/home")
application = webapp.WSGIApplication(
[('/.*', Redirector)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| 22.642857
| 67
| 0.705047
| 74
| 634
| 5.662162
| 0.594595
| 0.047733
| 0.090692
| 0.105012
| 0.128878
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.167192
| 634
| 27
| 68
| 23.481481
| 0.778409
| 0
| 0
| 0.111111
| 0
| 0
| 0.26183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a0b98cc37e3d3bfecf8eba880eba829290a251c
| 1,862
|
py
|
Python
|
deepgp_dsvi/demos/step_function.py
|
dks28/Deep-Gaussian-Process
|
a7aace43e78aae81468849aee7d172742e6ecf86
|
[
"MIT"
] | 21
|
2020-03-07T15:40:13.000Z
|
2021-11-05T07:49:24.000Z
|
deepgp_dsvi/demos/step_function.py
|
dks28/Deep-Gaussian-Process
|
a7aace43e78aae81468849aee7d172742e6ecf86
|
[
"MIT"
] | 3
|
2021-02-03T13:32:45.000Z
|
2021-07-17T16:07:06.000Z
|
src/demos/step_function.py
|
FelixOpolka/Deep-Gaussian-Process
|
40181f210d7b09863c321d1a90335be77233df80
|
[
"MIT"
] | 2
|
2020-08-10T14:02:28.000Z
|
2020-12-28T16:03:09.000Z
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gpflow.kernels import White, RBF
from gpflow.likelihoods import Gaussian
from deep_gp import DeepGP
np.random.seed(0)
tf.random.set_seed(0)
def get_data():
Ns = 300
Xs = np.linspace(-0.5, 1.5, Ns)[:, None]
N, M = 50, 25
X = np.random.uniform(0, 1, N)[:, None]
Z = np.random.uniform(0, 1, M)[:, None]
f_step = lambda x: 0. if x < 0.5 else 1.
Y = np.reshape([f_step(x) for x in X], X.shape) + np.random.randn(
*X.shape) * 1e-2
return Xs, X, Y, Z
def make_deep_GP(num_layers, X, Y, Z):
kernels = []
layer_sizes = []
for l in range(num_layers):
kernel = RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5)
kernels.append(kernel)
layer_sizes.append(1)
dgp = DeepGP(X, Y, Z, kernels, layer_sizes, Gaussian(), num_samples=100)
# init hidden layers to be near deterministic
for layer in dgp.layers[:-1]:
layer.q_sqrt.assign(layer.q_sqrt * 1e-5)
return dgp
if __name__ == '__main__':
Xs, X_train, Y_train, Z = get_data()
dgp = make_deep_GP(3, X_train, Y_train, Z)
optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08)
for _ in range(1500):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(dgp.trainable_variables)
objective = -dgp.elbo((X_train, Y_train))
gradients = tape.gradient(objective, dgp.trainable_variables)
optimizer.apply_gradients(zip(gradients, dgp.trainable_variables))
print(f"ELBO: {-objective.numpy()}")
samples, _, _ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True)
plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3)
plt.title('Deep Gaussian Process')
plt.scatter(X_train, Y_train)
plt.show()
| 31.033333
| 77
| 0.645005
| 295
| 1,862
| 3.908475
| 0.40339
| 0.027754
| 0.024284
| 0.041631
| 0.08673
| 0.034692
| 0
| 0
| 0
| 0
| 0
| 0.036301
| 0.215897
| 1,862
| 60
| 78
| 31.033333
| 0.753425
| 0.023093
| 0
| 0
| 0
| 0
| 0.030803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.133333
| 0
| 0.222222
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a0bd26d528523a33d941c1d0799a814a2b95dcf
| 5,343
|
py
|
Python
|
metaspace/engine/sm/engine/annotation_lithops/moldb_pipeline.py
|
METASPACE2020/METASPACE
|
e1acd9a409f84a78eed7ca9713258c09b0e137ca
|
[
"Apache-2.0"
] | 32
|
2018-08-13T15:49:42.000Z
|
2022-01-17T18:32:19.000Z
|
metaspace/engine/sm/engine/annotation_lithops/moldb_pipeline.py
|
METASPACE2020/METASPACE
|
e1acd9a409f84a78eed7ca9713258c09b0e137ca
|
[
"Apache-2.0"
] | 624
|
2018-07-02T15:18:22.000Z
|
2022-03-30T08:10:35.000Z
|
metaspace/engine/sm/engine/annotation_lithops/moldb_pipeline.py
|
METASPACE2020/METASPACE
|
e1acd9a409f84a78eed7ca9713258c09b0e137ca
|
[
"Apache-2.0"
] | 6
|
2021-01-10T22:24:30.000Z
|
2022-03-16T19:14:37.000Z
|
from __future__ import annotations
import json
import logging
from contextlib import contextmanager, ExitStack
from typing import List, Dict
import pandas as pd
from lithops.storage import Storage
from lithops.storage.utils import CloudObject, StorageNoSuchKeyError
from sm.engine.annotation_lithops.build_moldb import (
build_moldb,
InputMolDb,
DbFDRData,
)
from sm.engine.annotation_lithops.calculate_centroids import (
calculate_centroids,
validate_centroids,
)
from sm.engine.annotation_lithops.executor import Executor
from sm.engine.annotation_lithops.io import (
CObj,
save_cobj,
iter_cobjects_with_prefetch,
deserialize,
)
from sm.engine.annotation_lithops.utils import jsonhash
from sm.engine.utils.db_mutex import DBMutex
from sm.engine.ds_config import DSConfig
from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper
logger = logging.getLogger('annotation-pipeline')
class CentroidsCacheEntry:
def __init__(
self, executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb]
):
ds_hash_params = ds_config.copy()
self.ds_config = {
**ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122
# Include the `targeted` value of databases so that a new cache entry is made if
# someone manually changes that field
'databases': [(moldb['id'], moldb['targeted']) for moldb in moldbs],
}
# Remove database_ids as it may be in a different order to moldbs
del self.ds_config['database_ids']
self.ds_hash = jsonhash(self.ds_config)
self.executor = executor
self.storage = executor.storage
self.bucket, raw_prefix = sm_storage['centroids']
self.prefix = f"{raw_prefix}/{self.ds_hash}"
self.config_key = f'{self.prefix}/ds_config.json'
self.meta_key = f'{self.prefix}/meta'
@contextmanager
def lock(self):
with DBMutex().lock(self.ds_hash, timeout=3600):
yield
def load(self):
try:
db_data_cobjs, peaks_cobjs = deserialize(
self.storage.get_object(self.bucket, self.meta_key)
)
return db_data_cobjs, peaks_cobjs
except StorageNoSuchKeyError:
return None
def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]):
def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage: Storage):
# If Lithops' storage supported Copy Object operations, this could be easily optimized.
# Not sure if it's worth the effort yet
result_cobjs = []
for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)):
dest_key = f'{dest_prefix}/{i:06}'
result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key))
return result_cobjs
dest_bucket = self.bucket
# Copy cobjs to the cache dir
new_db_data_cobjs, new_peaks_cobjs = self.executor.map(
batch_copy,
[(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')],
runtime_memory=1024,
)
# Save config in case it's needed for debugging
self.storage.put_cloudobject(
json.dumps(self.ds_config, indent=4), self.bucket, self.config_key
)
# Save list of cobjects. This list would be easy to reconstruct by listing keys, but
# saving a separate object as the last step of the process is helpful to confirm that
# the cache item is complete, and didn't partially fail to copy.
save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key)
return new_db_data_cobjs, new_peaks_cobjs
def clear(self):
keys = self.storage.list_keys(self.bucket, self.prefix)
if keys:
logger.info(f'Clearing centroids cache {self.prefix}')
self.storage.delete_objects(self.bucket, keys)
def get_moldb_centroids(
executor: Executor,
sm_storage: Dict,
ds_config: DSConfig,
moldbs: List[InputMolDb],
debug_validate=False,
use_cache=True,
use_db_mutex=True,
):
moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs)
with ExitStack() as stack:
if use_db_mutex:
stack.enter_context(moldb_cache.lock())
if use_cache:
cached_val = moldb_cache.load()
else:
cached_val = None
moldb_cache.clear()
if cached_val:
db_data_cobjs, peaks_cobjs = cached_val
logger.info(
f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from cache'
)
else:
formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs)
isocalc_wrapper = IsocalcWrapper(ds_config)
peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper)
if debug_validate:
validate_centroids(executor, peaks_cobjs)
moldb_cache.save(db_data_cobjs, peaks_cobjs)
logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to cache')
return db_data_cobjs, peaks_cobjs
| 36.59589
| 99
| 0.668351
| 685
| 5,343
| 4.989781
| 0.290511
| 0.024576
| 0.041837
| 0.038619
| 0.176419
| 0.115565
| 0.086893
| 0.078994
| 0.060854
| 0.060854
| 0
| 0.003735
| 0.248362
| 5,343
| 145
| 100
| 36.848276
| 0.847361
| 0.124088
| 0
| 0.053097
| 0
| 0
| 0.079923
| 0.016285
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061947
| false
| 0
| 0.141593
| 0
| 0.256637
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a0dc9555ac01260e856ab868bd3c294497c065f
| 2,830
|
py
|
Python
|
gui/main_window/node_editor/items/connector_top_item.py
|
anglebinbin/Barista-tool
|
2d51507fb3566881923f0b273127f59d23ed317f
|
[
"MIT"
] | 1
|
2020-02-11T19:05:17.000Z
|
2020-02-11T19:05:17.000Z
|
gui/main_window/node_editor/items/connector_top_item.py
|
anglebinbin/Barista-tool
|
2d51507fb3566881923f0b273127f59d23ed317f
|
[
"MIT"
] | null | null | null |
gui/main_window/node_editor/items/connector_top_item.py
|
anglebinbin/Barista-tool
|
2d51507fb3566881923f0b273127f59d23ed317f
|
[
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import QMenu
from gui.main_window.node_editor.items.connector_item import ConnectorItem
class ConnectorTopItem(ConnectorItem):
""" Class to provide top connector functionality """
def __init__(self, index, nodeItem, nodeEditor, parent=None):
super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent)
def isTopConnector(self):
""" Returns whether the connector is a top connector (implementation for parent class) """
return True
def isInPlace(self):
""" Returns whether the connector is connected to a in-place working layer
A top connector is in place if any connected bottom connector is in place.
(implementation for parent class) """
for connection in self._connections:
if connection.getIsInPlace():
return True
return False
def getConnectedNodes(self):
""" Returns a list of node items, connected to this connector (implementation for parent class) """
nodes = list()
# for each connection get the node connected to the bottom of the connection
for connection in self._connections:
connectionsBottomConnector = connection.getBottomConnector()
if connectionsBottomConnector is not None:
nodes.append(connectionsBottomConnector.getNodeItem())
return nodes
def addConnection(self, connection):
""" Adds a connection to the connector and sets the start of the connection to this connectors position
(implementation for parent class) """
self._connections.append(connection)
connection.setStart(self.scenePos())
def updateConnectionPositions(self):
""" Updates the connected connections, sets the start of all connected connections to this connectors position
(implementation for parent class) """
for connection in self._connections:
connection.setStart(self.scenePos())
def contextMenuEvent(self, event):
""" Context menu for the top connector """
contextMenu = QMenu()
renameTop = contextMenu.addAction("Change name")
disconnectTop = contextMenu.addAction("Disconnect")
if self.getConnectionCount() == 0:
disconnectTop.setEnabled(False)
removeTop = contextMenu.addAction("Remove")
action = contextMenu.exec_(event.screenPos())
if action is not None:
if action == removeTop:
self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index)
elif action == renameTop:
self._nodeEditor.tryToRenameTopBlob(self)
elif action == disconnectTop:
self._nodeEditor.disconnectTopBlob(self._nodeItem.getLayerID(), self._index)
| 44.21875
| 118
| 0.673852
| 289
| 2,830
| 6.519031
| 0.346021
| 0.045117
| 0.06104
| 0.07431
| 0.259023
| 0.135881
| 0.101911
| 0.101911
| 0.061571
| 0
| 0
| 0.000943
| 0.250883
| 2,830
| 63
| 119
| 44.920635
| 0.887736
| 0.280212
| 0
| 0.175
| 0
| 0
| 0.013875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175
| false
| 0
| 0.05
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a0e7a4577ac3f9f8b9fd994210704a26f91ee39
| 2,606
|
py
|
Python
|
api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
"""Command models to open a Thermocycler's lid."""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from typing_extensions import Literal, Type
from pydantic import BaseModel, Field
from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
from opentrons.protocol_engine.types import MotorAxis
if TYPE_CHECKING:
from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
OpenLidCommandType = Literal["thermocycler/openLid"]
class OpenLidParams(BaseModel):
"""Input parameters to open a Thermocycler's lid."""
moduleId: str = Field(..., description="Unique ID of the Thermocycler.")
class OpenLidResult(BaseModel):
"""Result data from opening a Thermocycler's lid."""
class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]):
"""Execution implementation of a Thermocycler's open lid command."""
def __init__(
self,
state_view: StateView,
equipment: EquipmentHandler,
movement: MovementHandler,
**unused_dependencies: object,
) -> None:
self._state_view = state_view
self._equipment = equipment
self._movement = movement
async def execute(self, params: OpenLidParams) -> OpenLidResult:
"""Open a Thermocycler's lid."""
thermocycler_state = self._state_view.modules.get_thermocycler_module_substate(
params.moduleId
)
thermocycler_hardware = self._equipment.get_module_hardware_api(
thermocycler_state.module_id
)
# move the pipettes and gantry over the trash
# do not home plunger axes because pipettes may be holding liquid
await self._movement.home(
[
MotorAxis.X,
MotorAxis.Y,
MotorAxis.RIGHT_Z,
MotorAxis.LEFT_Z,
]
)
if thermocycler_hardware is not None:
await thermocycler_hardware.open()
return OpenLidResult()
class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]):
"""A command to open a Thermocycler's lid."""
commandType: OpenLidCommandType = "thermocycler/openLid"
params: OpenLidParams
result: Optional[OpenLidResult]
_ImplementationCls: Type[OpenLidImpl] = OpenLidImpl
class OpenLidCreate(BaseCommandCreate[OpenLidParams]):
"""A request to open a Thermocycler's lid."""
commandType: OpenLidCommandType = "thermocycler/openLid"
params: OpenLidParams
_CommandCls: Type[OpenLid] = OpenLid
| 30.302326
| 87
| 0.699156
| 263
| 2,606
| 6.771863
| 0.387833
| 0.051095
| 0.055025
| 0.057271
| 0.138686
| 0.126895
| 0.101067
| 0.101067
| 0.101067
| 0.101067
| 0
| 0
| 0.224482
| 2,606
| 85
| 88
| 30.658824
| 0.881247
| 0.149655
| 0
| 0.078431
| 0
| 0
| 0.041822
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.156863
| 0
| 0.45098
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a11d7dca909e3885ae2dbc3bc1e2d0a99547ada
| 3,901
|
py
|
Python
|
scripts/randomize_sw2_seed.py
|
epichoxha/nanodump
|
3a269ed427b474a701197e13ce40cb1daf803a82
|
[
"Apache-2.0"
] | null | null | null |
scripts/randomize_sw2_seed.py
|
epichoxha/nanodump
|
3a269ed427b474a701197e13ce40cb1daf803a82
|
[
"Apache-2.0"
] | null | null | null |
scripts/randomize_sw2_seed.py
|
epichoxha/nanodump
|
3a269ed427b474a701197e13ce40cb1daf803a82
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import glob
import random
import struct
def get_old_seed():
with open('include/syscalls.h') as f:
code = f.read()
match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code)
assert match is not None, 'SW2_SEED not found!'
return match.group(1)
def replace_seed(old_seed, new_seed):
with open('include/syscalls.h') as f:
code = f.read()
code = code.replace(
f'#define SW2_SEED {old_seed}',
f'#define SW2_SEED 0x{new_seed:08X}',
1
)
with open('include/syscalls.h', 'w') as f:
f.write(code)
def get_function_hash(seed, function_name, is_syscall=True):
function_hash = seed
function_name = function_name.replace('_', '')
if is_syscall and function_name[:2] == 'Nt':
function_name = 'Zw' + function_name[2:]
name = function_name + '\0'
ror8 = lambda v: ((v >> 8) & (2 ** 32 - 1)) | ((v << 24) & (2 ** 32 - 1))
for segment in [s for s in [name[i:i + 2] for i in range(len(name))] if len(s) == 2]:
partial_name_short = struct.unpack('<H', segment.encode())[0]
function_hash ^= partial_name_short + ror8(function_hash)
return function_hash
def replace_syscall_hashes(seed):
with open('source/syscalls.c') as f:
code = f.read()
regex = re.compile(r'__declspec\(naked\) NTSTATUS (Nt[^(]+)')
syscall_names = re.findall(regex, code)
syscall_names = set(syscall_names)
syscall_definitions = code.split('#elif defined(__GNUC__)')[3]
for syscall_name in syscall_names:
regex = re.compile('NTSTATUS ' + syscall_name + '\\(.*?"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL)
match = re.search(regex, syscall_definitions)
assert match is not None, f'hash of syscall {syscall_name} not found!'
old_hash = match.group(1)
new_hash = get_function_hash(seed, syscall_name)
print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}')
code = code.replace(
old_hash,
f'0x{new_hash:08X}'
)
with open('source/syscalls.c', 'w') as f:
f.write(code)
with open('source/syscalls-asm.asm') as f:
code = f.read()
for syscall_name in syscall_names:
regex = re.compile(syscall_name + ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL)
match = re.search(regex, code)
assert match is not None, f'hash of syscall {syscall_name} not found!'
old_hash = match.group(1)
new_hash = get_function_hash(seed, syscall_name)
code = code.replace(
f'0{old_hash}h',
f'0{new_hash:08X}h',
1
)
with open('source/syscalls-asm.asm', 'w') as f:
f.write(code)
def replace_dinvoke_hashes(seed):
for header_file in glob.glob("include/**/*.h", recursive=True):
with open(header_file) as f:
code = f.read()
regex = re.compile(r'#define (\w+)_SW2_HASH (0x[a-fA-F0-9]{8})')
matches = re.findall(regex, code)
for function_name, old_hash in matches:
new_hash = get_function_hash(seed, function_name, is_syscall=False)
code = code.replace(
f'#define {function_name}_SW2_HASH {old_hash}',
f'#define {function_name}_SW2_HASH 0x{new_hash:08X}',
1
)
if matches:
with open(header_file, 'w') as f:
f.write(code)
def main():
new_seed = random.randint(2 ** 28, 2 ** 32 - 1)
#new_seed = 0x1337c0de
old_seed = get_old_seed()
replace_seed(old_seed, new_seed)
replace_syscall_hashes(new_seed)
replace_dinvoke_hashes(new_seed)
if os.name == 'nt':
print('done! recompile with:\nnmake -f Makefile.msvc')
else:
print('done! recompile with:\nmake -f Makefile.mingw')
if __name__ == '__main__':
main()
| 32.508333
| 104
| 0.600103
| 568
| 3,901
| 3.922535
| 0.207746
| 0.059246
| 0.015709
| 0.017953
| 0.461849
| 0.380162
| 0.253591
| 0.2307
| 0.194794
| 0.132855
| 0
| 0.027586
| 0.256601
| 3,901
| 119
| 105
| 32.781513
| 0.74069
| 0.016406
| 0
| 0.242105
| 0
| 0
| 0.213876
| 0.024517
| 0
| 0
| 0
| 0
| 0.031579
| 1
| 0.063158
| false
| 0
| 0.052632
| 0
| 0.136842
| 0.031579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a11fa8d863a9e5b451bd2a7ef2241aafe768509
| 1,289
|
py
|
Python
|
checker/checker/executer.py
|
grimpy/hexa-a
|
556e9a2a70758bf9c7d70f91776d361b40524c78
|
[
"Apache-2.0"
] | 3
|
2018-02-05T11:43:04.000Z
|
2019-02-22T18:11:55.000Z
|
checker/checker/executer.py
|
grimpy/hexa-a
|
556e9a2a70758bf9c7d70f91776d361b40524c78
|
[
"Apache-2.0"
] | 4
|
2019-03-26T09:51:43.000Z
|
2019-03-31T06:41:14.000Z
|
checker/checker/executer.py
|
grimpy/hexa-a
|
556e9a2a70758bf9c7d70f91776d361b40524c78
|
[
"Apache-2.0"
] | 1
|
2019-03-03T20:55:21.000Z
|
2019-03-03T20:55:21.000Z
|
from subprocess import run, PIPE, TimeoutExpired, CompletedProcess
from codes import exitcodes
def _error_decode(response):
stderr = ""
if response.returncode:
if response.returncode < 0:
errmsg = exitcodes.get(abs(response.returncode), "Unknown Error")
if isinstance(errmsg, dict):
errmsg = errmsg["descr"]
else:
errmsg = response.stderr
stderr = "Exit code ({}): {}".format(abs(response.returncode), errmsg)
return response.returncode, stderr
def execute(cmd, workdir=None, timeout=60):
cmd = ["/bin/bash", "-c", cmd]
try:
response = run(
cmd,
stderr=PIPE,
stdout=PIPE,
cwd=workdir,
timeout=timeout,
universal_newlines=True,
)
except TimeoutExpired:
response = CompletedProcess(
args=cmd,
returncode=124,
stderr="Timeout"
)
except:
response = CompletedProcess(
args=cmd,
returncode=-1,
stderr="Internal Checker Error"
)
response.stdout = "" if not response.stdout else str(response.stdout)
response.returncode, response.stderr = _error_decode(response)
return response
| 30.690476
| 78
| 0.577967
| 121
| 1,289
| 6.115702
| 0.429752
| 0.145946
| 0.051351
| 0.083784
| 0.110811
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008037
| 0.324282
| 1,289
| 42
| 79
| 30.690476
| 0.841561
| 0
| 0
| 0.102564
| 0
| 0
| 0.058915
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.051282
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a124e6043f5f93ce124eed73efc4b8488512375
| 1,739
|
py
|
Python
|
pfm/pf_command/update.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 9
|
2018-01-06T05:44:43.000Z
|
2020-06-24T00:15:16.000Z
|
pfm/pf_command/update.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 27
|
2018-01-06T09:29:48.000Z
|
2020-04-10T16:11:59.000Z
|
pfm/pf_command/update.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 1
|
2018-01-09T01:33:42.000Z
|
2018-01-09T01:33:42.000Z
|
import json
from pfm.pf_command.base import BaseCommand
from pfm.util.log import logger
class UpdateCommand(BaseCommand):
def __init__(self, name, forward_type,
remote_host, remote_port, local_port,
ssh_server, server_port, login_user, config):
super(UpdateCommand, self).__init__(config)
self.name = name
self.forward_type = forward_type
self.remote_host = remote_host
self.remote_port = remote_port
self.local_port = local_port
self.ssh_server = ssh_server
self.server_port = server_port
self.login_user = login_user
def run(self):
f = open(self.config_path, 'r')
targets = json.load(f)
if self.name in targets:
target = targets[self.name]
self.update(target)
else:
logger.warn("Port forward setting named " + self.name + "is not registered")
# write the target
f = open(self.config_path, 'w')
f.write(json.dumps(targets, indent=4))
f.close()
def update(self, target):
if self.forward_type is not None:
target["type"] = self.forward_type
if self.remote_host is not None:
target["remote_host"] = self.remote_host
if self.remote_port is not None:
target["remote_port"] = self.remote_port
if self.local_port is not None:
target["local_port"] = self.local_port
if self.ssh_server is not None:
target["ssh_server"] = self.ssh_server
if self.server_port is not None:
target["server_port"] = self.server_port
if self.login_user is not None:
target["login_user"] = self.login_user
| 34.78
| 88
| 0.617021
| 229
| 1,739
| 4.458515
| 0.240175
| 0.047013
| 0.061704
| 0.10284
| 0.119491
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000815
| 0.294422
| 1,739
| 49
| 89
| 35.489796
| 0.831296
| 0.009201
| 0
| 0
| 0
| 0
| 0.06566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a12692597c07586454530c9bcf5baae61076b3f
| 7,499
|
py
|
Python
|
tests/atfork/test_atfork.py
|
luciferliu/xTools
|
324ef1388be13ece0d952e3929eb685212d573f1
|
[
"Apache-2.0"
] | null | null | null |
tests/atfork/test_atfork.py
|
luciferliu/xTools
|
324ef1388be13ece0d952e3929eb685212d573f1
|
[
"Apache-2.0"
] | null | null | null |
tests/atfork/test_atfork.py
|
luciferliu/xTools
|
324ef1388be13ece0d952e3929eb685212d573f1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed to the PSF under a Contributor Agreement.
#
# Author: Gregory P. Smith <greg@krypto.org>
"""Tests for atfork."""
import os
import sys
import importlib
from xTool.compat import StringIO
import traceback
import unittest
from xTool import atfork
class AtforkTest(unittest.TestCase):
def setUp(self):
atfork.monkeypatch_os_fork_functions()
self.calls = []
self.orig_stderr = sys.stderr
self.assertFalse(
atfork._fork_lock.locked(),
"atfork._fork_lock not released by an earlier test!",
)
# Unregister calls registered by earlier tests.
atfork._prepare_call_list = []
atfork._parent_call_list = []
atfork._child_call_list = []
def tearDown(self):
# Un-monkeypatch the os module. ook.
global os
importlib.reload(os)
sys.stderr = self.orig_stderr
def _pre(self):
self.calls.append(self._pre)
def _parent(self):
self.calls.append(self._parent)
def _child(self):
self.calls.append(self._child)
def _other(self):
self.calls.append(self._other)
def _raise_pre(self):
self._pre()
raise RuntimeError("This as the first parent error expected.")
def _raise_parent(self):
self._parent()
raise RuntimeError("This as the second parent error expected.")
def _raise_child(self):
self._child()
raise RuntimeError("This child error is expected.")
def _assert_expected_parent_stderr(self, error_msg):
self.assertTrue(("first parent error" in error_msg), error_msg)
self.assertTrue(("second parent error" in error_msg), error_msg)
self.assertTrue(
(error_msg.index("first parent") < error_msg.index("second parent")),
"first and second errors out of order in:\n%r" % error_msg,
)
self.assertEqual(2, error_msg.count("RuntimeError:"))
def _assert_expected_child_stderr(self, error_msg):
self.assertTrue("child error is expected" in error_msg)
self.assertEqual(1, error_msg.count("RuntimeError:"), error_msg)
def test_monkeypatching(self):
if not hasattr(os, "fork"):
return # Nothing to test on this platform.
self.assertTrue(callable(atfork._orig_os_fork))
self.assertTrue(callable(atfork._orig_os_forkpty))
# The os module was patched, these should not be equal.
self.assertNotEqual(atfork._orig_os_fork, os.fork)
self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty)
# These are the wrapped versions we patched in.
self.assertEqual(atfork.os_fork_wrapper, os.fork)
self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty)
def test_register_atfork_calls(self):
# Test with both positional and keyword arguments as well as None.
atfork.atfork(self._pre, self._parent, self._child)
atfork.atfork(prepare=self._pre)
atfork.atfork(parent=self._parent)
atfork.atfork(child=self._child)
self.assertEqual([self._pre] * 2, atfork._prepare_call_list)
self.assertEqual([self._parent] * 2, atfork._parent_call_list)
self.assertEqual([self._child] * 2, atfork._child_call_list)
if __debug__:
self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3)
def test_call_atfork_list(self):
self.assertEqual([], atfork._call_atfork_list([]))
self.assertEqual([], atfork._call_atfork_list([self._pre]))
def raise_something():
raise RuntimeError()
errors = atfork._call_atfork_list([raise_something] * 2)
self.assertEqual(2, len(errors))
for exc_info in errors:
self.assertEqual(RuntimeError, exc_info[0])
def _test_a_fork_wrapper(self, fork_func):
sys.stderr = StringIO() # restored in tearDown
atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child)
atfork.atfork(self._other, self._other, self._other)
pid = fork_func()
if pid == 0:
try:
try:
self.assertEqual(
[self._pre, self._other, self._child, self._other], self.calls
)
self.assertFalse(atfork._fork_lock.locked())
self._assert_expected_child_stderr(sys.stderr.getvalue())
except BaseException:
try:
traceback.print_exc()
self.orig_stderr.write(sys.stderr.getvalue())
finally:
os._exit(1)
finally:
os._exit(0)
else:
self.assertEqual(
[self._pre, self._other, self._parent, self._other], self.calls
)
self.assertFalse(atfork._fork_lock.locked())
self.assertEqual(0, os.waitpid(pid, 0)[1], "error in child")
self._assert_expected_parent_stderr(sys.stderr.getvalue())
def test_os_fork_wrapper(self):
self._test_a_fork_wrapper(os.fork)
def test_os_forkpty_wrapper(self):
self._test_a_fork_wrapper(lambda: os.forkpty()[0])
def _test_fork_failure(self, orig_fork_attrname, fork_wrapper):
def failing_fork():
raise OSError(0, "testing a fork failure")
atfork.atfork(self._pre, self._parent, self._child)
orig_orig_fork = getattr(atfork, orig_fork_attrname)
try:
setattr(atfork, orig_fork_attrname, failing_fork)
try:
pid = fork_wrapper()
if pid == 0:
# This should never happen but do this just in case.
os._exit(0)
except OSError:
self.assertEqual([self._pre, self._parent], self.calls)
else:
self.fail("Fork failed to fail!")
finally:
setattr(atfork, orig_fork_attrname, orig_orig_fork)
def test_fork_wrapper_failure(self):
self._test_fork_failure("_orig_os_fork", atfork.os_fork_wrapper)
def test_forkpty_wrapper_failure(self):
self._test_fork_failure("_orig_os_forkpty", atfork.os_forkpty_wrapper)
def test_multiple_monkeypatch_safe(self):
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
atfork.monkeypatch_os_fork_functions()
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
atfork.monkeypatch_os_fork_functions()
self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)
self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)
if __name__ == "__main__":
unittest.main()
| 37.123762
| 86
| 0.648887
| 926
| 7,499
| 4.971922
| 0.222462
| 0.024761
| 0.026064
| 0.048653
| 0.356212
| 0.28106
| 0.221546
| 0.165508
| 0.149001
| 0.111642
| 0
| 0.005204
| 0.256834
| 7,499
| 201
| 87
| 37.308458
| 0.820922
| 0.139752
| 0
| 0.202797
| 0
| 0
| 0.064184
| 0
| 0
| 0
| 0
| 0
| 0.258741
| 1
| 0.160839
| false
| 0
| 0.055944
| 0
| 0.230769
| 0.006993
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a131e98cf16cdcab3785e1e0af7a922aba56c50
| 2,213
|
py
|
Python
|
IO/files/handling.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
IO/files/handling.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
IO/files/handling.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
__all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs']
def list_files_recur(path):
"""
Cheater function that wraps path.rglob().
:param Path path: path to list recursively
:return list: list of Path objects
"""
files = []
for file in path.rglob('*'):
files.append(file)
return files
def scan_and_create_dir_tree(path, file=True):
"""
Creates all the necessary directories for the file at the end of path to be created.
When specified with a filepath to a file or folder, it creates directories until the path is valid.
:param Path path: must end with a filename, else the final directory won't be created
:param bool file: Boolean, does the given path end with a file? If not, path.parts[-1] will be created
:return None:
"""
parts = path.parts
path_to_check = Path(parts[0])
for i in range(1, len(parts)):
if not path_to_check.exists():
path_to_check.mkdir()
path_to_check = path_to_check / parts[i]
if file:
pass
else:
if not path_to_check.exists():
path_to_check.mkdir()
def get_all_data_files(path, filetype):
"""
Recursively search the given directory for .xxx files.
:param Path path: Path to search
:param str filetype: str, ".type" of file to search for
:return list: list of file-like Path objects
"""
files = list_files_recur(path)
files[:] = [file for file in files if filetype in file.name]
return files
def get_subsubdirs(path):
"""
Get the second-level subdirectories of the given path.
If given path 'a/b', a sample return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc']
:param str path:
:return list: list containing Path instances for all paths found two levels below the supplied path
"""
leveltwo_subdirs = []
immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()]
for scan in immediate_subdirs:
for subdir in scan:
leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else None
return leveltwo_subdirs
| 28.371795
| 106
| 0.66742
| 338
| 2,213
| 4.230769
| 0.319527
| 0.041958
| 0.053846
| 0.022378
| 0.131469
| 0.053147
| 0.053147
| 0.053147
| 0.053147
| 0.053147
| 0
| 0.002377
| 0.239494
| 2,213
| 77
| 107
| 28.74026
| 0.847296
| 0.432445
| 0
| 0.193548
| 0
| 0
| 0.063644
| 0.020924
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0.032258
| 0.064516
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a139742e2452134cace4ac02e78a8badeceb098
| 2,617
|
py
|
Python
|
tools/mo/openvino/tools/mo/ops/detection_output_onnx.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/ops/detection_output_onnx.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/ops/detection_output_onnx.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes
from openvino.tools.mo.ops.op import Op
class ExperimentalDetectronDetectionOutput(Op):
op = 'ExperimentalDetectronDetectionOutput'
enabled = True
def __init__(self, graph, attrs):
mandatory_props = dict(
type=self.op,
op=self.op,
version='opset6',
infer=self.infer,
reverse_infer=self.reverse_infer,
type_infer=self.type_infer,
in_ports_count=4,
out_ports_count=3,
)
super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self):
return [
('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()),
'max_detections_per_image',
'nms_threshold',
'num_classes',
'post_nms_count',
'score_threshold',
'max_delta_log_wh',
('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))]
@staticmethod
def infer(node):
rois_num = node.max_detections_per_image
# boxes
node.out_port(0).data.set_shape([rois_num, 4])
# classes, scores, batch indices
# We use range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3), because there are incorrectly
# generated models where ExperimentalDetectronDetectionOutput has 4 outputs.
for port_ind in range(1, 1 + max(node.out_ports().keys())):
if not node.out_port(port_ind).disconnected():
node.out_port(port_ind).data.set_shape([rois_num])
@staticmethod
def type_infer(node):
in_data_type = node.in_port(0).get_data_type()
node.out_port(0).set_data_type(in_data_type)
node.out_port(1).set_data_type(np.int32) # the second output contains class indices
node.out_port(2).set_data_type(in_data_type)
if node.is_out_port_connected(3):
node.out_port(3).set_data_type(np.int32) # the fourth output contains batch indices
@staticmethod
def reverse_infer(node):
set_input_shapes(node,
shape_array([dynamic_dimension_value, 4]),
shape_array([dynamic_dimension_value, node['num_classes'] * 4]),
shape_array([dynamic_dimension_value, node['num_classes']]),
shape_array([1, 3]))
| 39.059701
| 117
| 0.635078
| 328
| 2,617
| 4.762195
| 0.375
| 0.040333
| 0.049296
| 0.049936
| 0.232394
| 0.145967
| 0.09219
| 0.09219
| 0.058899
| 0
| 0
| 0.018566
| 0.259075
| 2,617
| 66
| 118
| 39.651515
| 0.787004
| 0.143294
| 0
| 0.06
| 0
| 0
| 0.10927
| 0.052844
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.06
| 0.02
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a139aa59f68903a8a744250e0c92696c28eb301
| 2,046
|
py
|
Python
|
driver.py
|
FahimMahmudJoy/Physionet_2019_Sepsis
|
d31bec40aa0359071bfaff1a4d72569c5731a04e
|
[
"BSD-2-Clause"
] | 1
|
2019-06-26T19:38:33.000Z
|
2019-06-26T19:38:33.000Z
|
driver.py
|
FahimMahmudJoy/Physionet_2019_Sepsis
|
d31bec40aa0359071bfaff1a4d72569c5731a04e
|
[
"BSD-2-Clause"
] | null | null | null |
driver.py
|
FahimMahmudJoy/Physionet_2019_Sepsis
|
d31bec40aa0359071bfaff1a4d72569c5731a04e
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import numpy as np, os, sys
from get_sepsis_score import load_sepsis_model, get_sepsis_score
def load_challenge_data(file):
with open(file, 'r') as f:
header = f.readline().strip()
column_names = header.split('|')
data = np.loadtxt(f, delimiter='|')
# Ignore SepsisLabel column if present.
if column_names[-1] == 'SepsisLabel':
column_names = column_names[:-1]
data = data[:, :-1]
return data
def save_challenge_predictions(file, scores, labels):
with open(file, 'w') as f:
f.write('PredictedProbability|PredictedLabel\n')
for (s, l) in zip(scores, labels):
f.write('%g|%d\n' % (s, l))
if __name__ == '__main__':
# Parse arguments.
if len(sys.argv) != 3:
raise Exception('Include the input and output directories as arguments, e.g., python driver.py input output.')
input_directory = sys.argv[1]
output_directory = sys.argv[2]
# Find files.
files = []
for f in os.listdir(input_directory):
if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'):
files.append(f)
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
# Load model.
model = load_sepsis_model()
print(model)
# Iterate over files.
for f in files:
# Load data.
input_file = os.path.join(input_directory, f)
data = load_challenge_data(input_file)
# print(type(data))
# Make predictions.
num_rows = len(data)
scores = np.zeros(num_rows)
labels = np.zeros(num_rows)
for t in range(num_rows):
current_data = data[:t+1]
current_score, current_label = get_sepsis_score(current_data, model)
scores[t] = current_score
labels[t] = current_label
# Save results.
output_file = os.path.join(output_directory, f)
save_challenge_predictions(output_file, scores, labels)
| 30.537313
| 124
| 0.623167
| 274
| 2,046
| 4.463504
| 0.368613
| 0.02453
| 0.034342
| 0.017989
| 0.040883
| 0.040883
| 0
| 0
| 0
| 0
| 0
| 0.004596
| 0.255621
| 2,046
| 66
| 125
| 31
| 0.798424
| 0.091398
| 0
| 0
| 0
| 0
| 0.087615
| 0.020011
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.119048
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a139fa7954e69a2e28f61ebd4a2c8e7028fb83e
| 2,589
|
py
|
Python
|
src/LspRuntimeMonitor.py
|
TafsirGna/ClspGeneticAlgorithm
|
25184afbbd52773b8aed2e268ae98dd9656cacda
|
[
"MIT"
] | null | null | null |
src/LspRuntimeMonitor.py
|
TafsirGna/ClspGeneticAlgorithm
|
25184afbbd52773b8aed2e268ae98dd9656cacda
|
[
"MIT"
] | null | null | null |
src/LspRuntimeMonitor.py
|
TafsirGna/ClspGeneticAlgorithm
|
25184afbbd52773b8aed2e268ae98dd9656cacda
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.5
# -*-coding: utf-8 -*
from collections import defaultdict
from threading import Thread
from time import perf_counter, time
from LspLibrary import bcolors
import time
import matplotlib.pyplot as plt
class LspRuntimeMonitor:
"""
"""
clockStart = None
clockEnd = None
mutation_strategy = "simple_mutation"
popsData = defaultdict(lambda: None)
outputString = ""
outputFilePath = "data/output/output.txt"
verbose = False
running = True
def __init__(self) -> None:
"""
"""
pass
@classmethod
def duration(cls):
"""
"""
return f"{cls.clockEnd - cls.clockStart} second(s)"
@classmethod
def started(cls):
"""
"""
cls.running = True
LspRuntimeMonitor.clockStart = perf_counter()
print(f"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}")
# Thread(cls.waitingAnimation())
@classmethod
def ended(cls):
"""
"""
cls.running = False
LspRuntimeMonitor.clockEnd = perf_counter()
@classmethod
def output(cls, output):
"""
"""
cls.outputString += output
if cls.verbose:
print(output)
@classmethod
def saveOutput(cls):
"""
"""
f = open(cls.outputFilePath, "w")
f.write(cls.outputString)
f.close()
@classmethod
def report(cls):
"""
"""
# Duration
durationStatement = cls.duration()
cls.output(durationStatement)
# Saving all generated output to a default file
cls.saveOutput()
cls.plotData()
@classmethod
def plotData(cls):
"""
"""
print('-----------------------------------------')
print(cls.popsData)
data = list(cls.popsData.values())[0]
# Plots
# Plotting the evolution of the minimal cost over generations
plt.plot(list(range(len(data["max"]))), data["max"])
plt.ylabel("Population maximal cost")
plt.show()
# Plotting the evolution of the minimal cost over generations
plt.plot(list(range(len(data["min"]))), data["min"])
plt.ylabel("Population minimal cost")
plt.show()
@classmethod
def waitingAnimation(cls):
"""
"""
animation = "|/-\\"
idx = 0
# while thing_not_complete():
while cls.running:
print(animation[idx % len(animation)], end="\r")
idx += 1
time.sleep(0.1)
| 21.940678
| 71
| 0.545384
| 250
| 2,589
| 5.604
| 0.428
| 0.079943
| 0.018558
| 0.031406
| 0.105639
| 0.105639
| 0.105639
| 0.105639
| 0.105639
| 0.105639
| 0
| 0.00453
| 0.317883
| 2,589
| 118
| 72
| 21.940678
| 0.788788
| 0.107764
| 0
| 0.15625
| 0
| 0
| 0.11039
| 0.041744
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140625
| false
| 0.015625
| 0.09375
| 0
| 0.390625
| 0.078125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a152a32efa9784006230b4163868ce2479ff3ba
| 20,737
|
py
|
Python
|
methylcheck/predict/sex.py
|
FoxoTech/methylcheck
|
881d14d78e6086aab184716e0b79cdf87e9be8bf
|
[
"MIT"
] | null | null | null |
methylcheck/predict/sex.py
|
FoxoTech/methylcheck
|
881d14d78e6086aab184716e0b79cdf87e9be8bf
|
[
"MIT"
] | 11
|
2021-04-08T16:14:54.000Z
|
2022-03-09T00:22:13.000Z
|
methylcheck/predict/sex.py
|
FoxoTech/methylcheck
|
881d14d78e6086aab184716e0b79cdf87e9be8bf
|
[
"MIT"
] | 1
|
2022-02-10T09:06:45.000Z
|
2022-02-10T09:06:45.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
#app
import methylcheck # uses .load; get_sex uses methylprep models too and detect_array()
import logging
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def _get_copy_number(meth,unmeth):
"""function to return copy number.
requires dataframes of methylated and
unmethylated values. can be raw OR corrected"""
# minfi R version:
# log2(getMeth(object) + getUnmeth(object))
return np.log2(meth+unmeth)
def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False,
on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True,
poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False):
"""This will calculate and predict the sex of each sample.
inputs:
=======
the "data_source" can be any one of:
path -- to a folder with csv data that contains processed sample data
path -- to a folder with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes
path -- to a folder also containing samplesheet pkl and poobah_values.pkl, if you want to compare predicted sex with actual sex.
data_containers -- object created from methylprep.run_pipeline() or methylcheck.load(path, 'meth')
tuple of (meth, unmeth) dataframes
array_type (string)
enum: {'27k','450k','epic','epic+','mouse'}
if not specified, it will load the data from data_source and determine the array for you.
median_cutoff
the minimum difference in the medians of X and Y probe copy numbers to assign male or female
(copied from the minfi sex predict function)
include_probe_failure_percent:
True: includes poobah percent per sample as column in the output table and on the plot.
Note: you must supply a 'path' as data_source to include poobah in plots.
poobah_cutoff
The maximum percent of sample probes that can fail before the sample fails. Default is 20 (percent)
Has no effect if `include_probe_failure_percent` is False.
plot
True: creates a plot, with option to `save` as image or `return_fig`.
save
True: saves the plot, if plot is True
return_fig
If True, returns a pyplot figure instead of a dataframe. Default is False.
Note: return_fig will not show a plot on screen.
return_labels: (requires plot == True)
When using poobah_cutoff, the figure only includes A-Z,1...N labels on samples on plot to make it easier to read.
So to get what sample_ids these labels correspond to, you can rerun the function with return_labels=True and it will
skip plotting and just return a dictionary with sample_ids and these labels, to embed in a PDF report if you like.
custom_label:
Option to provide a dictionary with keys as sample_ids and values as labels to apply to samples.
e.g. add more data about samples to the multi-dimensional QC plot
while providing a filepath is the easiest way, you can also pass in a data_containers object,
a list of data_containers containing raw meth/unmeth values, instead. This object is produced
by methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth') and lets you
customize the import if your files were not prepared using methylprep (non-standand CSV columns, for example)
If a `poobah_values.pkl` file can be found in path, the dataframe returned will also include
percent of probes for X and Y chromosomes that failed quality control, and warn the user if any did.
This feature won't work if a containers object or tuple of dataframes is passed in, instead of a path.
Note: ~90% of Y probes should fail if the sample is female. That chromosome is missing."""
allowed_array_types = {'27k','450k','epic','epic+','mouse'}
try:
from methylprep.files import Manifest
from methylprep.models import ArrayType
except ImportError:
raise ImportError("This function requires methylprep to be installed (pip3 install `methylprep`)")
(data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source)
# data_source_type is one of {'path', 'container', 'control', 'meth_unmeth_tuple'}
poobah=None
if data_source_type in ('path'):
# this will look for saved pickles first, then csvs or parsing the containers (which are both slower)
# the saved pickles function isn't working for batches yet.
try:
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=None, path=data_source,
compare=False, noob=False, verbose=False)
except Exception as e:
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=None, path=data_source,
compare=False, noob=True, verbose=False)
if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists():
poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser())
elif data_source_type in ('container'):
# this will look for saved pickles first, then csvs or parsing the containers (which are both slower)
# the saved pickles function isn't working for batches yet.
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=data_source, path=None,
compare=False, noob=False, verbose=False)
elif data_source_type == 'meth_unmeth_tuple':
(meth, unmeth) = data_source
if len(meth) != len(unmeth):
raise ValueError(f"WARNING: probe count mismatch: meth {len(meth)} -- unmeth {len(unmeth)}")
if array_type == None:
# get list of X any Y probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here
array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda))
elif isinstance(array_type,str):
if array_type in allowed_array_types:
array_type = ArrayType(array_type)
else:
raise ValueError(f"Your array_type must be one of these: {allowed_array_types} or None.")
if verbose:
LOGGER.debug(array_type)
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic', 'epic+'
LOGGER.setLevel(logging.INFO)
x_probes = manifest.index[manifest['CHR']=='X']
y_probes = manifest.index[manifest['CHR']=='Y']
if verbose:
LOGGER.info(f"Found {len(x_probes)} X and {len(y_probes)} Y probes")
# dataframes of meth and unmeth values for the sex chromosomes
x_meth = meth[meth.index.isin(x_probes)]
x_unmeth = unmeth[unmeth.index.isin(x_probes)]
y_meth = meth[meth.index.isin(y_probes)]
y_unmeth = unmeth[unmeth.index.isin(y_probes)]
# create empty dataframe for output
output = pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex'])
# get median values for each sex chromosome for each sample
x_med = _get_copy_number(x_meth,x_unmeth).median()
y_med = _get_copy_number(y_meth,y_unmeth).median()
# populate output dataframe with values
output['x_median'] = output.index.map(x_med)
output['y_median'] = output.index.map(y_med)
# compute difference
median_difference = output['y_median'] - output['x_median']
# median cutoff - can be manipulated by user --- default = -2 --- used to predict sex
sex0 = ['F' if x < median_cutoff else 'M' for x in median_difference]
# NOTE for testing: GSE85566/GPL13534 (N=120) has 4 samples that are predicted as wrong sex when using -2, but work at -0.5.
# populate dataframe with predicted sex
output['predicted_sex'] = sex0
output = output.round(1)
# if poobah_df exists, calculate percent X and Y probes that failed
sample_failure_percent = {} # % of ALL probes in sample, not just X or Y
if include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame):
p_value_cutoff = 0.05
X_col = []
Y_col = []
failed_samples = []
for column in poobah.columns:
sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1)
failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index
failed_x_probe_names = list(set(failed_probe_names) & set(x_probes))
failed_y_probe_names = list(set(failed_probe_names) & set(y_probes))
X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1)
Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1)
X_col.append(X_percent)
Y_col.append(Y_percent)
if X_percent > 10:
failed_samples.append(column)
output['X_fail_percent'] = X_col #output.index.map(X_col)
output['Y_fail_percent'] = Y_col #output.index.map(Y_col)
if failed_samples != []:
LOGGER.warning(f"{len(failed_samples)} samples had >10% of X probes fail p-value probe detection. Predictions for these may be unreliable:")
LOGGER.warning(f"{failed_samples}")
if data_source_type in ('path'):
output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output)
if plot == True:
fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'
sample_failure_percent=sample_failure_percent,
median_cutoff=median_cutoff,
include_probe_failure_percent=include_probe_failure_percent,
verbose=verbose,
save=save,
poobah_cutoff=poobah_cutoff,
custom_label=custom_label,
data_source_type=data_source_type,
data_source=data_source,
return_fig=return_fig,
return_labels=return_labels,
)
if return_labels:
return fig # these are a lookup dictionary of labels
if return_fig:
return fig
return output
def _plot_predicted_sex(data=pd.DataFrame(),
sample_failure_percent={},
median_cutoff= -2,
include_probe_failure_percent=True,
verbose=False,
save=False,
poobah_cutoff=20, #%
custom_label=None,
data_source_type=None,
data_source=None,
return_fig=False,
return_labels=False):
"""
data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent']
- color is sex, pink or blue
- marker circle size will be larger and more faded if poobah values are worse, smaller and darker if low variance. Like a probability cloud.
- sample text is (ID, delta age)
- sex mismatches are X, matched samples are circles (if samplesheet contains actual sex data)
- omits labels for samples that have LOW failure rates, but shows IDs when failed
- adds legend of sketchy samples and labels
- show delta age on labels (using custom column dict)
- unit tests with custom label and without, and check that controls_report still works with this function
- save_fig
- return_labels, returns a lookup dict instead of plot
if there is a "custom_label" dict passed in, such as (actual_age - predicted_age), it simply adds those this label to the marker text labels.
Dicts must match the data DF index.
"""
if sample_failure_percent != {} and set(sample_failure_percent.keys()) == set(data.index):
data['sample_failure_percent'] = pd.Series(sample_failure_percent)
else:
LOGGER.warning("sample_failure_percent index did not align with output data index")
#sns.set_theme(style="white")
show_mismatches = None if 'sex_matches' not in data.columns else "sex_matches"
if show_mismatches:
data["sex_matches"] = data["sex_matches"].map({0:"Mismatch", 1:"Match"})
show_failure = None if 'sample_failure_percent' not in data.columns else "sample_failure_percent"
sample_sizes = (20, 600)
if show_failure: # avoid sizing dots with narrow range; gives false impression of bad samples.
poobah_range = data["sample_failure_percent"].max() - data["sample_failure_percent"].min()
if poobah_range < poobah_cutoff/2:
show_failure = None
sample_sizes = (40,40)
custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7']))
# if only one sex, make sure male is blue; female is pink
# if hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M')
# if first value to be plotted is male, change palette
if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M':
custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89']))
fig = sns.relplot(data=data,
x='x_median',
y='y_median',
hue="predicted_sex",
size=show_failure,
style=show_mismatches,
sizes=sample_sizes,
alpha=.5,
palette=custom_palette,
height=8,
aspect=1.34)
ax = fig.axes[0,0]
fig.fig.subplots_adjust(top=.95)
# for zoomed-in plots with few points close together, set the min scale to be at least 2 units.
yscale = plt.gca().get_ylim()
xscale = plt.gca().get_xlim()
if abs(yscale[1]-yscale[0]) < 2.0:
ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1)
ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1)
label_lookup = {index_val: chr(i+65) if (i <= 26) else str(i-26) for i,index_val in enumerate(data.index)}
for idx,row in data.iterrows():
if "sample_failure_percent" in row and row['sample_failure_percent'] > poobah_cutoff:
label = f"{label_lookup[idx]}, {custom_label.get(idx)}" if isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx]
ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred')
else:
label = f"{custom_label.get(idx)}" if isinstance(custom_label, dict) else None
if label:
ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey')
if return_labels:
plt.close() # release memory
return label_lookup
if "sample_failure_percent" in data.columns:
N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index)
N_total = len(data['sample_failure_percent'].index)
ax.set_title(f"{N_failed} of {N_total} samples failed poobah, with at least {poobah_cutoff}% of probes failing")
else:
ax.set_title(f"Predicted sex based on matching X and Y probes.")
if save:
filepath = 'predicted_sexes.png' if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser()
plt.savefig(filepath, bbox_inches="tight")
if return_fig:
return fig
plt.show()
def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output):
"""output is a dataframe with Sample_ID in the index. This adds actual_sex as a column and returns it."""
# controls_report() does the same thing, and only calls get_sex() with the minimum of data to be fast, because these are already loaded. Just passes in meth/unmeth data
# Sample sheet should have 'M' or 'F' in column to match predicted sex.
# merge actual sex into processed output, if available
file_patterns = {
'sample_sheet_meta_data.pkl': 'meta',
'*_meta_data.pkl': 'meta',
'*samplesheet*.csv': 'meta',
'*sample_sheet*.csv': 'meta',
}
loaded_files = {}
for file_pattern in file_patterns:
for filename in Path(filepath).expanduser().rglob(file_pattern):
if '.pkl' in filename.suffixes:
loaded_files['meta'] = pd.read_pickle(filename)
break
if '.csv' in filename.suffixes:
loaded_files['meta'] = pd.read_csv(filename)
break
if len(loaded_files) == 1:
# methylprep v1.5.4-6 was creating meta_data files with two Sample_ID columns. Check and fix here:
# methylcheck 0.7.9 / prep 1.6.0 meta_data lacking Sample_ID when sample_sheet uses alt column names and gets replaced.
if any(loaded_files['meta'].columns.duplicated()):
loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()]
LOGGER.info("Removed a duplicate Sample_ID column in samplesheet")
if 'Sample_ID' in loaded_files['meta'].columns:
loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')
elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns:
loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str)
loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')
else:
raise ValueError("Your sample sheet must have a Sample_ID column, or (Sentrix_ID and Sentrix_Position) columns.")
# fixing case of the relevant column
renamed_column = None
if ('Gender' in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns):
if 'Gender' in loaded_files['meta'].columns:
renamed_column = 'Gender'
elif 'Sex' in loaded_files['meta'].columns:
renamed_column = 'Sex'
else:
renamed_columns = {col:(col.title() if col.lower() in ('sex','gender') else col) for col in loaded_files['meta'].columns}
loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns)
if 'Gender' in renamed_columns.values():
renamed_column = 'Gender'
elif 'Sex' in renamed_columns.values():
renamed_column = 'Sex'
if renamed_column is not None:
# next, ensure samplesheet Sex/Gender (Male/Female) are recoded as M/F; controls_report() does NOT do this step, but should.
sex_values = set(loaded_files['meta'][renamed_column].unique())
#print('sex_values', sex_values)
if not sex_values.issubset(set(['M','F'])): # subset, because samples might only contain one sex
if 'Male' in sex_values or 'Female' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'})
elif 'male' in sex_values or 'female' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'})
elif 'MALE' in sex_values or 'FEMALE' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'})
elif 'm' in sex_values or 'f' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'})
else:
raise ValueError(f"Cannot compare with predicted sex because actual sexes listed in your samplesheet are not understood (expecting M or F): (found {sex_values})")
output['actual_sex'] = None
output['sex_matches'] = None
for row in output.itertuples():
try:
actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column))
except KeyError:
if 'Sample_ID' in output.columns:
LOGGER.warning("Sample_ID was another column in your output DataFrame; Set that to the index when you pass it in.")
raise KeyError("Could not read actual sex from meta data to compare.")
if isinstance(actual_sex, pd.Series):
LOGGER.warning(f"Multiple samples matched actual sex for {row.Index}, because Sample_ID repeats in sample sheets. Only using first match, so matches may not be accurate.")
actual_sex = actual_sex[0]
if hasattr(row,'predicted_sex'):
sex_matches = 1 if actual_sex.upper() == str(row.predicted_sex).upper() else 0
else:
sex_matches = np.nan
output.loc[row.Index, 'actual_sex'] = actual_sex
output.loc[row.Index, 'sex_matches'] = sex_matches
else:
pass # no Sex/Gender column found in samplesheet
return output
| 53.583979
| 191
| 0.672711
| 2,931
| 20,737
| 4.580007
| 0.181167
| 0.02868
| 0.036874
| 0.016389
| 0.257449
| 0.197482
| 0.161949
| 0.129172
| 0.090957
| 0.080825
| 0
| 0.009363
| 0.227468
| 20,737
| 386
| 192
| 53.722798
| 0.828589
| 0.325216
| 0
| 0.155039
| 0
| 0.011628
| 0.174485
| 0.025573
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015504
| false
| 0.007752
| 0.042636
| 0
| 0.085271
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a164cca97745158870c1da7ad0a330912380e28
| 2,504
|
py
|
Python
|
tests/test_basics.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_basics.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_basics.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import subprocess
from git_fortune._compat import fix_line_endings
from git_fortune.version import __version__
def test_help(capfd):
subprocess.check_call(["git-fortune", "-h"])
captured = capfd.readouterr()
assert (
fix_line_endings(
"""
A fortune-like command for showing git tips
Invoke it as 'git-fortune' or 'git fortune'
"""
)
in captured.out
)
def test_version(capfd):
subprocess.check_call(["git-fortune", "--version"])
captured = capfd.readouterr()
assert "git-fortune {}".format(__version__) in captured.out
def test_tip_boxformat(capfd):
subprocess.check_call(["git-fortune", "--id", "3"])
tip3boxbody = fix_line_endings(
"""\
+-------------------------------------------------------------------------------+
| GIT TIP #3 |
| |
| `git log --graph` can show you a tree-like representation of the git history. |
| |
| Try adding in `--oneline --decorate --all`. |
| |
+-------------------------------------------------------------------------------+
"""
)
captured = capfd.readouterr()
assert captured.out == tip3boxbody
def test_tip_plainformat(capfd):
subprocess.check_call(["git-fortune", "--format", "plain", "--id", "1"])
tip1plainbody = fix_line_endings(
"Modify your last commit before pushing with `git commit --amend`.\n"
)
captured = capfd.readouterr()
assert captured.out == tip1plainbody
def test_noargs(capfd):
"""just make sure it doesn't crashfail"""
subprocess.check_call(["git-fortune"])
captured = capfd.readouterr()
assert "GIT TIP #" in captured.out # from the box format
def test_category(capfd):
"""just make sure it doesn't crashfail"""
subprocess.check_call(["git-fortune", "--category", "diff"])
captured = capfd.readouterr()
assert "GIT TIP #" in captured.out # from the box format
def test_category_and_id_mutex(capfd):
ret = subprocess.call(["git-fortune", "--category", "diff", "--id", "3"])
assert ret == 2
captured = capfd.readouterr()
assert "" == captured.out
assert "argument --id: not allowed with argument --category" in captured.err
| 33.386667
| 81
| 0.527157
| 251
| 2,504
| 5.111554
| 0.334661
| 0.093531
| 0.076383
| 0.158223
| 0.506625
| 0.420889
| 0.221356
| 0.221356
| 0.221356
| 0.221356
| 0
| 0.005093
| 0.294329
| 2,504
| 74
| 82
| 33.837838
| 0.720996
| 0.044728
| 0
| 0.214286
| 0
| 0
| 0.181481
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 1
| 0.166667
| false
| 0
| 0.071429
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a167dd5d92960139223aa44954c2cb6cacf4375
| 2,487
|
py
|
Python
|
configs/keypoints/faster_rcnn_r50_fpn_keypoints.py
|
VGrondin/CBNetV2_mask_remote
|
b27246af5081d5395db3c3105d32226de05fcd13
|
[
"Apache-2.0"
] | null | null | null |
configs/keypoints/faster_rcnn_r50_fpn_keypoints.py
|
VGrondin/CBNetV2_mask_remote
|
b27246af5081d5395db3c3105d32226de05fcd13
|
[
"Apache-2.0"
] | null | null | null |
configs/keypoints/faster_rcnn_r50_fpn_keypoints.py
|
VGrondin/CBNetV2_mask_remote
|
b27246af5081d5395db3c3105d32226de05fcd13
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
type='FasterRCNN',
# pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
# type='StandardRoIHead',
_delete_=True,
type='KeypointRoIHead',
output_heatmaps=False,
# keypoint_head=dict(
# type='HRNetKeypointHead',
# num_convs=8,
# in_channels=256,
# features_size=[256, 256, 256, 256],
# conv_out_channels=512,
# num_keypoints=5,
# loss_keypoint=dict(type='MSELoss', loss_weight=50.0)),
keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)))
)
#optimizer = dict(lr=0.002)
#lr_config = dict(step=[40, 55])
#total_epochs = 60
| 32.298701
| 77
| 0.542421
| 292
| 2,487
| 4.386986
| 0.39726
| 0.118657
| 0.01171
| 0.01249
| 0.259953
| 0.235753
| 0.229508
| 0.165496
| 0.165496
| 0.165496
| 0
| 0.083284
| 0.31926
| 2,487
| 76
| 78
| 32.723684
| 0.673361
| 0.148372
| 0
| 0.166667
| 0
| 0
| 0.119297
| 0.029943
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a168cae49b57ce434a41c7070da071ca4734fc0
| 3,232
|
py
|
Python
|
maskrcnn_benchmark/layers/roi_align_rotated_3d.py
|
picwoon/As_built_BIM
|
9e6b81e2fd8904f5afd013e21d2db45456c138d5
|
[
"MIT"
] | 2
|
2020-03-05T06:39:03.000Z
|
2020-03-31T12:08:04.000Z
|
maskrcnn_benchmark/layers/roi_align_rotated_3d.py
|
picwoon/As_built_BIM
|
9e6b81e2fd8904f5afd013e21d2db45456c138d5
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/layers/roi_align_rotated_3d.py
|
picwoon/As_built_BIM
|
9e6b81e2fd8904f5afd013e21d2db45456c138d5
|
[
"MIT"
] | 1
|
2021-09-24T13:17:40.000Z
|
2021-09-24T13:17:40.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch, math
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d
import _C
class _ROIAlignRotated3D(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
# input: [4, 256, 304, 200, 7]
# roi: [171, 8]
# spatial_scale: 0.25
# output_size: [7,7,7]
# sampling_ratio: 2
output = _C.roi_align_rotated_3d_forward(
input, roi, spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio
) # [171, 256, 7, 7]
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w, zsize = ctx.input_shape
grad_input = _C.roi_align_rotated_3d_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
output_size[2],
bs,
ch,
h,
w,
zsize,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align_rotated_3d = _ROIAlignRotated3D.apply
class ROIAlignRotated3D(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
'''
output_size:[pooled_height, pooled_width]
spatial_scale: size_of_map/size_of_original_image
sampling_ratio: how many points to use for bilinear_interpolate
'''
super(ROIAlignRotated3D, self).__init__()
self.output_size = output_size # (7,7,7)
self.spatial_scale = spatial_scale # 0.25
self.sampling_ratio = sampling_ratio # 2
def forward(self, input_s3d, rois_3d):
'''
input0: sparse 3d tensor
rois_3d: 3d box, xyz order is same as input0,
yaw unit is rad, anti-clock wise is positive
input: [batch_size, feature, h, w]
rois: [n,5] [batch_ind, center_w, center_h, roi_width, roi_height, theta]
theta unit: degree, anti-clock wise is positive
Note: the order of w and h inside of input and rois is different.
'''
input_d3d = sparse_3d_to_dense_2d(input_s3d)
output = roi_align_rotated_3d(
input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio
)
return output
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 34.021053
| 101
| 0.63552
| 412
| 3,232
| 4.665049
| 0.296117
| 0.098855
| 0.031217
| 0.03538
| 0.184703
| 0.083247
| 0.046826
| 0.046826
| 0.046826
| 0.046826
| 0
| 0.029588
| 0.278465
| 3,232
| 94
| 102
| 34.382979
| 0.794597
| 0.219369
| 0
| 0.064516
| 0
| 0
| 0.019567
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.112903
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a16ef74b6b87e7acddaab1f4ea03a7e48da5422
| 8,360
|
py
|
Python
|
src/model/utils/utils.py
|
J-CITY/METADATA-EXTRACTOR
|
6bc01a7e4b74a3156c07efc2c80d5519c325dd53
|
[
"Apache-2.0"
] | null | null | null |
src/model/utils/utils.py
|
J-CITY/METADATA-EXTRACTOR
|
6bc01a7e4b74a3156c07efc2c80d5519c325dd53
|
[
"Apache-2.0"
] | null | null | null |
src/model/utils/utils.py
|
J-CITY/METADATA-EXTRACTOR
|
6bc01a7e4b74a3156c07efc2c80d5519c325dd53
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
from .logger import printLog
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "O"
class ParrotIOError(Exception):
def __init__(self, filename):
message = "ERROR: Can not find file {}.".format(filename)
super(ParrotIOError, self).__init__(message)
# Class that iterates over CoNLL Dataset
class CoNLLDataset(object):
def __init__(self, filename, processingWord=None, processingTag=None,
maxIter=None):
self.filename = filename
self.processingWord = processingWord # function that takes a word as input
self.processingTag = processingTag # function that takes a tag as input
self.maxIter = maxIter # max number of sentences to yield
self.length = None
def __iter__(self):
niter = 0
with open(self.filename, encoding='utf-8') as f:
words, tags = [], []
for line in f:
line = line.strip() # delete spaces in start and end
if (len(line) == 0 or line.startswith("-DOCSTART-")):
if len(words) != 0:
niter += 1
if self.maxIter is not None and niter > self.maxIter:
break
yield words, tags
words, tags = [], []
else:
ls = line.split(' ')
word, tag = ls[0],ls[-1]
if self.processingWord is not None:
word = self.processingWord(word)
if self.processingTag is not None:
tag = self.processingTag(tag)
words += [word]
tags += [tag]
def __len__(self):
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
#Create a dictionary from dataset
def getDictionary(datasets):
printLog("Building dictionary: ")
dictWords = set()
dictTags = set()
for dataset in datasets:
for words, tags in dataset:
dictWords.update(words)
dictTags.update(tags)
printLog("DONE: " + str(len(dictWords)) + " size")
return dictWords, dictTags
def getCharDictionary(dataset):
dictChar = set()
for words, _ in dataset:
for word in words:
dictChar.update(word)
return dictChar
#filename - path wo file with vectors
def getGloveDictionary(filename):
printLog("Building dictionary")
dictGlove = set()
with open(filename, encoding='utf-8') as f:
for line in f:
word = line.strip().split(' ')[0]
dictGlove.add(word)
printLog("DONE: "+ str(len(dictGlove)) +" tokens")
return dictGlove
def saveDictionary(dictionary, filename):
printLog("SAVE")
with open(filename, "w", encoding='utf-8') as f:
for i, word in enumerate(dictionary):
if i != len(dictionary) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
def loadDictionary(filename):
try:
d = dict()
with open(filename, encoding='utf-8') as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
except IOError:
raise ParrotIOError(filename)
return d
def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim):
embeddings = np.zeros([len(dictionary), dim])
with open(gloveFilename, encoding='utf-8') as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
if word in dictionary:
embedding = [float(x) for x in line[1:]] #glove coords
wordID = dictionary[word]
embeddings[wordID] = np.asarray(embedding)
np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove matrix
def getCompactGloveVectors(filename):
try:
with np.load(filename) as data:
return data["embeddings"]
except IOError:
raise ParrotIOError(filename)
def getProcessingWord(dictWords=None, dictChars=None,
lowercase=False, chars=False, allowUNK=True):
def f(word):
# char ids for word
if (dictChars is not None) and (chars == True):
charIDs = []
for char in word:
if (char in dictChars):
charIDs.append(dictChars[char])
if lowercase:
word = word.lower()
if word.isdigit():
word = NUM
# word id
if (dictWords is not None):
if word in dictWords:
word = dictWords[word]
elif allowUNK:
word = dictWords[UNK]
else:
raise Exception("Unknow tag.")
if (dictChars is not None) and (chars == True):
# chars ids and word id
return charIDs, word
# word id
return word
return f
def _padSequences(sequences, padtok, maxLength):
sequencePadded, sequenceLength = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:maxLength] + [padtok]*max(maxLength - len(seq), 0)
sequencePadded += [seq_]
sequenceLength += [min(len(seq), maxLength)]
# all sublist have same length
return sequencePadded, sequenceLength
def padSequences(sequences, padtok, nlevels=1):
if nlevels == 1:
maxLength = max(map(lambda x : len(x), sequences))
sequencePadded, sequenceLength = _padSequences(sequences,
padtok, maxLength)
elif nlevels == 2:
maxLengthWord = max([max(map(lambda x: len(x), seq))
for seq in sequences])
sequencePadded, sequenceLength = [], []
for seq in sequences:
# all words are same length
sp, sl = _padSequences(seq, padtok, maxLengthWord)
sequencePadded += [sp]
sequenceLength += [sl]
maxLengthSentence = max(map(lambda x : len(x), sequences))
sequencePadded, _ = _padSequences(sequencePadded,
[padtok]*maxLengthWord, maxLengthSentence)
sequenceLength, _ = _padSequences(sequenceLength, 0,
maxLengthSentence)
return sequencePadded, sequenceLength
def minibatches(data, minibatchSize):
x_batch, y_batch = [], []
for (x, y) in data:
if len(x_batch) == minibatchSize:
yield x_batch, y_batch
x_batch, y_batch = [], []
if type(x[0]) == tuple:
x = zip(*x)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield x_batch, y_batch
def getChunkType(tok, idxToTag):
tagName = idxToTag[tok]
tagClass = tagName.split('-')[0]
tagType = tagName.split('-')[-1]
return tagClass, tagType
def getChunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunkType, chunkStart, chunkEnd)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idxToTag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunkType, chunkStart = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunkType is not None:
# Add a chunk.
chunk = (chunkType, chunkStart, i)
chunks.append(chunk)
chunkType, chunkStart = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tokChunkClass, tokChunkType = getChunkType(tok, idxToTag)
if chunkType is None:
chunkType, chunkStart = tokChunkType, i
elif tokChunkType != chunkType or tokChunkClass == "B":
chunk = (chunkType, chunkStart, i)
chunks.append(chunk)
chunkType, chunkStart = tokChunkType, i
else:
pass
# end condition
if chunkType is not None:
chunk = (chunkType, chunkStart, len(seq))
chunks.append(chunk)
return chunks
| 32.784314
| 84
| 0.55323
| 901
| 8,360
| 5.084351
| 0.241953
| 0.008732
| 0.015717
| 0.015281
| 0.14451
| 0.120061
| 0.087754
| 0.079895
| 0.048461
| 0
| 0
| 0.00784
| 0.3439
| 8,360
| 254
| 85
| 32.913386
| 0.827347
| 0.093062
| 0
| 0.192893
| 0
| 0
| 0.023148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091371
| false
| 0.005076
| 0.015228
| 0
| 0.182741
| 0.030457
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a177f73dcbbd6c1d2721285cc1b7c72b4784fb1
| 2,781
|
py
|
Python
|
discordbot/economy/currencies.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | 1
|
2022-02-18T04:02:52.000Z
|
2022-02-18T04:02:52.000Z
|
discordbot/economy/currencies.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
discordbot/economy/currencies.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
import os
import df2img
import disnake
import pandas as pd
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.economy import wsj_model
async def currencies_command(ctx):
"""Currencies overview [Wall St. Journal]"""
try:
# Debug user input
if cfg.DEBUG:
logger.debug("econ-currencies")
# Retrieve data
df = wsj_model.global_currencies()
df = pd.DataFrame.from_dict(df)
# Check for argument
if df.empty:
raise Exception("No available data found")
df["Last"] = pd.to_numeric(df["Last"].astype(float))
df["Chng"] = pd.to_numeric(df["Chng"].astype(float))
df["%Chng"] = pd.to_numeric(df["%Chng"].astype(float))
formats = {"Last": "{:.2f}", "Chng": "{:.2f}", "%Chng": "{:.2f}%"}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df = df.fillna("")
df.set_index(" ", inplace=True)
# Debug user output
if cfg.DEBUG:
logger.debug(df.to_string())
df = df[
[
"Last",
"Chng",
"%Chng",
]
]
dindex = len(df.index)
fig = df2img.plot_dataframe(
df,
fig_size=(800, (40 + (40 * dindex))),
col_width=[8, 3, 3],
tbl_cells=dict(
align="left",
height=35,
),
template="plotly_dark",
font=dict(
family="Consolas",
size=20,
),
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = "econ-currencies.png"
df2img.save_dataframe(fig=fig, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
image = disnake.File(imagefile)
title = "Economy: [WSJ] Currencies"
embed = disnake.Embed(title=title, colour=cfg.COLOR)
embed.set_image(url=f"attachment://{imagefile}")
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
os.remove(imagefile)
await ctx.send(embed=embed, file=image)
except Exception as e:
embed = disnake.Embed(
title="ERROR Economy: [WSJ] Currencies",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
| 27.81
| 85
| 0.54297
| 316
| 2,781
| 4.670886
| 0.417722
| 0.01626
| 0.022358
| 0.026423
| 0.184959
| 0.126694
| 0.126694
| 0.126694
| 0.126694
| 0.126694
| 0
| 0.018848
| 0.332255
| 2,781
| 99
| 86
| 28.090909
| 0.775983
| 0.032003
| 0
| 0.131579
| 0
| 0
| 0.095004
| 0.009084
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.118421
| 0
| 0.118421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a17d1c656acfd1f8102ff27381a0764e4f0a027
| 3,276
|
py
|
Python
|
aiovectortiler/config_handler.py
|
shongololo/aiovectortiler
|
cfd0008d5ac05baee52a24264f991946324f5a42
|
[
"MIT"
] | 4
|
2016-07-24T20:39:40.000Z
|
2018-12-26T06:43:35.000Z
|
aiovectortiler/config_handler.py
|
songololo/aiovectortiler
|
cfd0008d5ac05baee52a24264f991946324f5a42
|
[
"MIT"
] | 7
|
2016-08-10T16:27:39.000Z
|
2018-10-13T13:16:24.000Z
|
aiovectortiler/config_handler.py
|
songololo/aiovectortiler
|
cfd0008d5ac05baee52a24264f991946324f5a42
|
[
"MIT"
] | 3
|
2016-08-09T03:12:24.000Z
|
2016-11-08T01:17:29.000Z
|
import os
import yaml
import logging
logger = logging.getLogger(__name__)
class Configs:
server = None
recipes = {}
DB = None
plugins = None
@classmethod
def init_server_configs(cls, server_configs):
with open(server_configs) as s_c:
cls.server = yaml.load(s_c.read())
@classmethod
def init_layer_recipes(cls, recipe_configs):
recipe_name = None
if '/' in recipe_configs:
recipe_name = os.path.normpath(recipe_configs).split('/')[-1]
# for windows
elif '\\' in recipe_configs:
recipe_name = os.path.normpath(recipe_configs).split('\\')[-1]
if recipe_name[-4:] == '.yml':
recipe_name = recipe_name[:-4]
elif recipe_name[-5:] == '.yaml':
recipe_name = recipe_name[:-5]
else:
raise FileExistsError('File in layer recipes folder does not have a YAML extension: {0}'.format(recipe_configs))
with open(recipe_configs) as r_c:
load_recipe = yaml.load(r_c.read())
cls.recipes[recipe_name] = Recipe(load_recipe)
# add the recipe name based on the file name
# this is needed by the tilejson query
cls.recipes[recipe_name].name = recipe_name
logger.info('Adding layer: {0}'.format(recipe_name))
'''
Plugins.load()
Plugins.hook('before_load', config=Configs)
def load_recipe(data):
name = data.get('name', 'default')
if name in RECIPES:
raise ValueError('Recipe with name {} already exist'.format(name))
data['name'] = name
RECIPES[name] = Recipe(data)
if len(RECIPES) == 1 and name != 'default':
RECIPES['default'] = RECIPES[data['name']]
for recipe in Configs.layers:
with Path(recipe).open() as f:
load_recipe(yaml.load(f.read()))
Plugins.hook('load', config=config, recipes=RECIPES)
'''
# the following model structures for recipes / layers / queries allows searching up the chain
# for attributes. If not found in the root recipes level then it will check the server configs.
class Recipe(dict):
def __init__(self, data):
super().__init__(data)
self.load_layers(data['layers'])
def load_layers(self, layers):
self.layers = {}
for layer in layers:
self.layers[layer['name']] = Layer(self, layer)
def __getattr__(self, attr):
return self.get(attr, Configs.server.get(attr, None))
class Layer(dict):
def __init__(self, recipe, layer_data):
self.recipe = recipe
super().__init__(layer_data)
self.load_queries(layer_data['queries'])
def load_queries(self, queries):
self.queries = []
for query in queries:
self.queries.append(Query(self, query))
def __getattr__(self, attr):
return self.get(attr, getattr(self.recipe, attr))
@property
def id(self):
return '{0}:{1}'.format(self.recipe.name, self.name)
@property
def description(self):
return self.get('description', 'no description provided')
class Query(dict):
def __init__(self, layer, data):
self.layer = layer
super().__init__(data)
def __getattr__(self, attr):
return self.get(attr, getattr(self.layer, attr))
| 28.99115
| 124
| 0.626984
| 420
| 3,276
| 4.695238
| 0.25
| 0.076065
| 0.026369
| 0.03499
| 0.123225
| 0.123225
| 0.123225
| 0.123225
| 0.105477
| 0.105477
| 0
| 0.004473
| 0.24939
| 3,276
| 112
| 125
| 29.25
| 0.797479
| 0.084554
| 0
| 0.140625
| 0
| 0
| 0.06296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.046875
| 0.078125
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a17e7c4a91ac2e9483c7bdc29806cbac3d7a40c
| 13,237
|
py
|
Python
|
t2vretrieval/models/mlmatch.py
|
Roc-Ng/HANet
|
e679703e9e725205424d87f750358fb4f62ceec5
|
[
"MIT"
] | 34
|
2021-07-26T12:22:05.000Z
|
2022-03-08T03:49:33.000Z
|
t2vretrieval/models/mlmatch.py
|
hexiangteng/HANet
|
31d37ccad9c56ff9422cb4eb9d32e79e7b9bc831
|
[
"MIT"
] | null | null | null |
t2vretrieval/models/mlmatch.py
|
hexiangteng/HANet
|
31d37ccad9c56ff9422cb4eb9d32e79e7b9bc831
|
[
"MIT"
] | 3
|
2021-08-03T06:00:26.000Z
|
2021-12-27T03:26:12.000Z
|
import numpy as np
import torch
import framework.ops
import t2vretrieval.encoders.mlsent
import t2vretrieval.encoders.mlvideo
import t2vretrieval.models.globalmatch
from t2vretrieval.models.criterion import cosine_sim
from t2vretrieval.models.globalmatch import VISENC, TXTENC
class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig):
def __init__(self):
super().__init__()
self.num_verbs = 4
self.num_nouns = 6
self.attn_fusion = 'embed' # sim, embed
self.simattn_sigma = 4
self.hard_topk = 1
self.max_violation = True
self.loss_weights = None
## this config will be covered by model.json due to the functions of load and load_from_dict
self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig()
self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig()
class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel):
def build_submods(self):
return {
VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]),
TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC])
}
def forward_video_embed(self, batch_data):
vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device)
vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device)
# (batch, max_vis_len, dim_embed)
vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens)
return {
'vid_sent_embeds': vid_sent_embeds,
'vid_verb_embeds': vid_verb_embeds,
'vid_noun_embeds': vid_noun_embeds,
'local_vid_embeds': local_sent_embeds,
'vid_lens': vid_lens,
'max_len': max_len,
'logits': logits,
}
def forward_text_embed(self, batch_data):
sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence
sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length
verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len
noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device)
node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n)
rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n)
verb_lens = torch.sum(verb_masks, 2)
noun_lens = torch.sum(noun_masks, 2)
# sent_embeds: (batch, dim_embed)
# verb_embeds, noun_embeds: (batch, num_xxx, dim_embed)
sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC](
sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges)
return {
'sent_embeds': sent_embeds, 'sent_lens': sent_lens,
'verb_embeds': verb_embeds, 'verb_lens': verb_lens,
'noun_embeds': noun_embeds, 'noun_lens': noun_lens,
'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds,
}
def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False):
'''Args:
- vid_embeds: (batch, num_frames, embed_size)
- vid_masks: (batch, num_frames)
- phrase_embeds: (batch, num_phrases, embed_size)
- phrase_masks: (batch, num_phrases)
'''
batch_vids, num_frames, _ = vid_embeds.size()
vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3)
batch_phrases, num_phrases, dim_embed = phrase_embeds.size()
# compute component-wise similarity
vid_2d_embeds = vid_embeds.view(-1, dim_embed)
phrase_2d_embeds = phrase_embeds.view(-1, dim_embed)
# size = (batch_vids, batch_phrases, num_frames, num_phrases)
ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view(
batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2)
###
if mask_flag:
vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ##############
else:
vid_attn_per_word = ground_sims
vid_attn_per_word[vid_attn_per_word < 0] = 0
vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2)
if mask_flag:
vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) #################
vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2)
if self.config.attn_fusion == 'embed':
vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds)
word_attn_sims = torch.einsum('abde,bde->abd',
framework.ops.l2norm(vid_attned_embeds),
framework.ops.l2norm(phrase_embeds))
elif self.config.attn_fusion == 'sim':
# (batch_vids, batch_phrases, num_phrases)
word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2)
# sum: (batch_vid, batch_phrases)
phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \
/ torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1)
return phrase_scores
def generate_scores(self, **kwargs):
##### shared #####
vid_lens = kwargs['vid_lens'] # (batch, )
num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1)
vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False)
# batch*max_len
##### sentence-level scores #####
sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds'])
#######################################################
# concept scores use jaccard similarity
concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0])
concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1])
#######################################################
##### verb-level scores #####
vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed)
verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs, dim_embed)
verb_lens = kwargs['verb_lens'] # (batch, num_verbs)
local_vid_embeds =kwargs['local_vid_embeds']
local_sent_embeds = kwargs['local_sent_embeds']
verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(),
self.config.num_verbs, inverse=False)
# sum: (batch_vids, batch_sents)
verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks)
ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True)
##### noun-level scores #####
vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed)
noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns, dim_embed)
noun_lens = kwargs['noun_lens'] # (batch, num_nouns)
noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(),
self.config.num_nouns, inverse=False)
# sum: (batch_vids, batch_sents)
noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks)
ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True)
return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores
def jaccard_sim(self, im, s):
im_bs = im.size(0)
s_bs = s.size(0)
im = im.unsqueeze(1).expand(-1, s_bs, -1)
s = s.unsqueeze(0).expand(im_bs, -1, -1)
intersection = torch.min(im, s).sum(-1)
union = torch.max(im, s).sum(-1)
score = intersection / union
return score
def forward_loss(self, batch_data, step=None):
enc_outs = self.forward_video_embed(batch_data)
cap_enc_outs = self.forward_text_embed(batch_data)
enc_outs.update(cap_enc_outs)
sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs)
scores = (sent_scores + verb_scores + noun_scores + local_verb_scores + local_noun_scores) / 5
scores2 = (concept_verb_scores + concept_noun_scores) / 2
sent_loss = self.criterion(sent_scores)
verb_loss = self.criterion(verb_scores)
noun_loss = self.criterion(noun_scores)
eta = 0.1
mu = 0.01
concept_verb_loss = 0.5*self.criterion(concept_verb_scores)
concept_noun_loss = 0.5*self.criterion(concept_noun_scores)
concept_loss = eta*self.criterion(scores2)
verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device)
noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device)
verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device)
noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device)
v_mask_sum = torch.sum(verb_concept_mask, dim=1)
n_mask_sum = torch.sum(noun_concept_mask, dim=1)
vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1)
vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum)
nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1)
nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum)
vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1)
vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum)
nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1)
nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum)
fusion_loss = self.criterion(scores)
if self.config.loss_weights is None:
loss = fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss
else:
loss = self.config.loss_weights[0] * fusion_loss + \
self.config.loss_weights[1] * sent_loss + \
self.config.loss_weights[2] * verb_loss + \
self.config.loss_weights[3] * noun_loss + \
vbce_loss + nbce_loss
if step is not None and self.config.monitor_iter > 0 and step % self.config.monitor_iter == 0:
neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10)
self.print_fn('\tstep %d: pos mean scores %.2f, hard neg mean scores i2t %.2f, t2i %.2f'%(
step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]),
torch.mean(torch.max(neg_scores, 0)[0])))
self.print_fn('\tstep %d: sent_loss %.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%(
step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item()))
self.print_fn('\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item()))
self.print_fn('\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item()))
self.print_fn('\tstep %d: sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(),
concept_verb_loss.item(), concept_noun_loss.item()))
return loss
def evaluate_scores(self, tst_reader):
K = self.config.subcfgs[VISENC].num_levels
K = K + 4
assert K == 7, 'Note that this error indicates losing other scores!'
vid_names, all_scores = [], [[] for _ in range(K)]
cap_names = tst_reader.dataset.captions
for vid_data in tst_reader:
vid_names.extend(vid_data['names'])
vid_enc_outs = self.forward_video_embed(vid_data)
for k in range(K):
all_scores[k].append([])
ijj = 0
for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size):
cap_enc_outs = self.forward_text_embed(cap_data)
cap_enc_outs.update(vid_enc_outs)
indv_scores = self.generate_scores(**cap_enc_outs)
for k in range(K):
all_scores[k][-1].append(indv_scores[k].data.cpu().numpy())
ijj += 0
for k in range(K):
all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1)
for k in range(K):
all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img, n_cap)
all_scores = np.array(all_scores) # (k, n_img, n_cap)
return vid_names, cap_names, all_scores
def evaluate(self, tst_reader, return_outs=False):
vid_names, cap_names, scores = self.evaluate_scores(tst_reader)
i2t_gts = []
for vid_name in vid_names:
i2t_gts.append([])
for i, cap_name in enumerate(cap_names):
if cap_name in tst_reader.dataset.ref_captions[vid_name]:
i2t_gts[-1].append(i)
t2i_gts = {}
for i, t_gts in enumerate(i2t_gts):
for t_gt in t_gts:
t2i_gts.setdefault(t_gt, [])
t2i_gts[t_gt].append(i)
idx = [0, 1, 2, 5, 6]
fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2
metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts)
if return_outs:
outs = {
'vid_names': vid_names,
'cap_names': cap_names,
'scores': scores,
}
return metrics, outs
else:
return metrics
| 46.939716
| 156
| 0.694568
| 1,936
| 13,237
| 4.410641
| 0.129132
| 0.02108
| 0.018269
| 0.019674
| 0.338564
| 0.231994
| 0.146973
| 0.093688
| 0.046141
| 0.038412
| 0
| 0.01392
| 0.169676
| 13,237
| 281
| 157
| 47.106762
| 0.762988
| 0.080079
| 0
| 0.055556
| 0
| 0.00463
| 0.079025
| 0
| 0
| 0
| 0
| 0
| 0.00463
| 1
| 0.046296
| false
| 0
| 0.037037
| 0.00463
| 0.138889
| 0.023148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a190e5eb1440e6a01fc6f170da74507f39571ac
| 6,295
|
py
|
Python
|
dronesym-python/flask-api/src/dronepool.py
|
dilinade/DroneSym
|
30073bd31343bc27c6b8d72e48b4e06ced0c5fe6
|
[
"Apache-2.0"
] | 1
|
2019-03-24T23:50:07.000Z
|
2019-03-24T23:50:07.000Z
|
dronesym-python/flask-api/src/dronepool.py
|
dilinade/DroneSym
|
30073bd31343bc27c6b8d72e48b4e06ced0c5fe6
|
[
"Apache-2.0"
] | null | null | null |
dronesym-python/flask-api/src/dronepool.py
|
dilinade/DroneSym
|
30073bd31343bc27c6b8d72e48b4e06ced0c5fe6
|
[
"Apache-2.0"
] | null | null | null |
#DronePool module which handles interaction with SITLs
from dronekit import Vehicle, VehicleMode, connect
from dronekit_sitl import SITL
from threading import Lock
import node, time
import mavparser
import threadrunner
drone_pool = {}
instance_count = 0
env_test = False
q = None
mq = None
lock = Lock()
class Sim(SITL, object):
def __init__(self, instance=1, home=None):
super(Sim, self).download("copter", "3.3", verbose=not env_test)
self.instance = instance
if home:
self.home = home
else:
self.home = {"lat":6.9271, "lon":79.8612, "alt": 1}
self.p = None
return
def connection_string(self):
return super(Sim, self).connection_string()[:-4] + str(5760 + self.instance * 10)
def launch(self):
home_str = str(self.home['lat']) + ',' + str(self.home['lon']) + ',0,353'
super(Sim, self).launch(["--instance", str(self.instance), "--home", home_str], await_ready=True, verbose=not env_test)
def get_sitl_status(self):
return { 'id': self.instance, 'home': self.home }
def initialize():
global q, mq, instance_count
q = threadrunner.q
mq = threadrunner.mq
drones = node.get_drones()['drones']
if not drones:
return
for drone_id in drones:
if drone_id not in list(drone_pool.keys()):
drone = node.get_drone_by_id(drone_id)
location = drone['location']
q.put((create_new_drone, { "db_key" : drone_id, "home" : location }))
if 'status' in list(drone.keys()) and drone['status'] == 'FLYING':
q.put((resume_flight, { "drone_id" : drone_id }))
def resume_flight(kwargs):
drone_id = kwargs.get("drone_id", None)
drone = node.get_drone_by_id(drone_id)
waypoints = []
for wp in sorted(drone['waypoints']):
waypoints.append(drone['waypoints'][wp])
next_waypoint = waypoints.index(drone['waypoint'])
print (next_waypoint)
q.put((takeoff_drone, { "drone_id" : drone_id, "waypoints" : waypoints[next_waypoint:] }))
def create_new_drone(kwargs):
global instance_count
instance_count += 1
home = kwargs.get("home", None)
db_key = kwargs.get("db_key", None)
retries = 3
drone = Sim(instance_count, home)
drone.launch()
while retries > 0:
try:
drone_conn = connect(drone.connection_string(), wait_ready=True)
break
except:
print ("Retrying...")
retries -= 1
drone_pool[db_key] = drone_conn
res = { "status" : "OK", "id" : db_key }
return res
def remove_drone(kwargs):
drone_id = kwargs.get("drone_id", None)
if drone_id not in drone_pool:
return { "status" : "ERROR", "msg" : "Drone instance not found" }
drone = drone_pool[drone_id]
if drone.mode == VehicleMode('AUTO'):
return { "status" : "ERROR", "msg" : "Drone in operation" }
del drone_pool[drone_id]
return { "status" : "OK", "id" : drone_id }
def run_mission(drone, target_height, waypoints):
while True:
print(("Reaching target alt : " + str(drone.location.global_relative_frame.alt)))
if drone.location.global_relative_frame.alt >= target_height * 0.9:
break
print ('target alt reached')
mavparser.create_mission(drone, waypoints)
print ('mission acquired')
drone.mode = VehicleMode('AUTO')
print ('initiating sequence')
print ('in mission')
def attach_listener(kwargs):
attr = kwargs.get('attr', None)
fn = kwargs.get('fn', None)
attach_fn = kwargs.get('attach_fn', None)
if not fn == None and not attr == None and not attach_fn == None:
attach_fn(attr, fn)
def takeoff_drone(kwargs):
global q
drone_id = kwargs.get("drone_id", None)
target_height = kwargs.get("target_height", 10)
waypoints = kwargs.get("waypoints", None)
try:
drone = drone_pool[drone_id]
except:
raise
drone.initialize()
drone.mode = VehicleMode('GUIDED')
drone.armed = True
while not drone.armed:
time.sleep(1)
drone.simple_takeoff(target_height)
print (waypoints)
if waypoints:
run_mission(drone, target_height, waypoints)
def detach_event_listeners(drone, value, status):
drone.remove_attribute_listener('location', update_location)
drone.remove_attribute_listener('airspeed', update_airspeed)
drone.remove_attribute_listener('attitude', udpate_attitude)
drone.remove_attribute_listener('heading', update_heading)
node.update_drone(drone_id, { "location" : {"lat": value.global_relative_frame.lat, "lon": value.global_relative_frame.lon, "alt": value.global_relative_frame.alt}, "status": status})
return
def update_location(self, attr_name, value):
node.update_drone(drone_id, { "location" : {"lat": value.global_relative_frame.lat, "lon": value.global_relative_frame.lon, "alt": value.global_relative_frame.alt}, "status": "FLYING"})
command_len = len(drone.commands)
wp_len = len(waypoints)
if command_len >= wp_len :
diff = command_len - wp_len
next_wp = max(drone.commands.__next__ - diff, 0) % len(waypoints)
waypoint = waypoints[next_wp]
# print "df: " + `diff`
# print next_wp
node.update_drone(drone_id, { "waypoint" : waypoint })
if drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1:
detach_event_listeners(drone, value, "HALTED")
return
if drone.commands.__next__ == len(drone.commands):
detach_event_listeners(drone, value, "FINISHED")
return
def update_airspeed(self, attr_name, value):
node.update_drone(drone_id, {"airspeed": value})
def udpate_attitude(self, attr_name, value):
node.update_drone(drone_id, { "pitch": value.pitch, 'roll': value.roll, 'yaw': value.yaw })
def update_heading(self, attr_name, value):
node.update_drone(drone_id, { "heading": value })
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'location', "fn" : update_location }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'airspeed', "fn" : update_airspeed }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'attitude', "fn" : udpate_attitude }))
mq.put((attach_listener, { "attach_fn" : drone.add_attribute_listener, "attr" : 'heading', "fn" : update_heading }))
print ('took off')
return True
def land_drone(kwargs):
drone_id = kwargs.get("drone_id", None)
try:
drone = drone_pool[drone_id]
except:
raise
if not drone.armed:
return False
cmds = drone.commands
cmds.wait_ready()
cmds.clear()
drone.mode = VehicleMode('LAND')
print((drone.mode))
return True
| 27.133621
| 187
| 0.707705
| 884
| 6,295
| 4.825792
| 0.186652
| 0.047586
| 0.040084
| 0.028129
| 0.296296
| 0.246835
| 0.205345
| 0.199015
| 0.178153
| 0.11158
| 0
| 0.007268
| 0.147577
| 6,295
| 231
| 188
| 27.251082
| 0.787738
| 0.014138
| 0
| 0.160494
| 0
| 0
| 0.107045
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104938
| false
| 0
| 0.037037
| 0.012346
| 0.234568
| 0.061728
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a19dea1f3bc079f6c50613369f0699df82e34cf
| 2,365
|
py
|
Python
|
Problemset/longest-string-chain/longest-string-chain.py
|
KivenCkl/LeetCode
|
fcc97c66f8154a5d20c2aca86120cb37b9d2d83d
|
[
"MIT"
] | 7
|
2019-05-08T03:41:05.000Z
|
2020-12-22T12:39:43.000Z
|
Problemset/longest-string-chain/longest-string-chain.py
|
Yuziquan/LeetCode
|
303fc1c8af847f783c4020bd731b28b72ed92a35
|
[
"MIT"
] | 1
|
2021-07-19T03:48:35.000Z
|
2021-07-19T03:48:35.000Z
|
Problemset/longest-string-chain/longest-string-chain.py
|
Yuziquan/LeetCode
|
303fc1c8af847f783c4020bd731b28b72ed92a35
|
[
"MIT"
] | 7
|
2019-05-10T20:43:20.000Z
|
2021-02-22T03:47:35.000Z
|
# @Title: 最长字符串链 (Longest String Chain)
# @Author: KivenC
# @Date: 2019-05-26 20:35:25
# @Runtime: 144 ms
# @Memory: 13.3 MB
class Solution:
# # way 1
# def longestStrChain(self, words: List[str]) -> int:
# # 动态规划
# # dp[i] = max(dp[i], dp[j] + 1) (0 <= j < i 且 words[j] 是 words[i] 的前身)
# length = len(words)
# if length < 2:
# return length
# dp = [1 for _ in range(length)]
# words.sort(key=len) # 按字符串长度递增排序
# for i in range(1, length):
# if i >= 1 and words[i] == words[i - 1]: # 去重
# continue
# for j in range(i - 1, -1, -1):
# if len(words[i]) - len(words[j]) > 1: # 剪枝
# break
# if len(words[i]) == len(words[j]):
# continue
# if self.isPre(words[j], words[i]):
# dp[i] = max(dp[i], dp[j] + 1)
# return max(dp)
# def isPre(self, word1: str, word2: str) -> bool:
# # 判断 word1 是否是 word2 的前身
# # 双指针
# # i, j, length1, length2 = 0, 0, len(word1), len(word2)
# # while i < length1 and j < length2:
# # if word1[i] == word2[j]:
# # i += 1
# # j += 1
# # if length2 - length1 == 1 and i == length1:
# # return True
# # return False
# # word2 去除任意一个位置的字符后与 word1 进行比对
# if len(word1) + 1 != len(word2):
# return False
# for i in range(len(word2)):
# if word2[: i] + word2[i + 1:] == word1:
# return True
# return False
# way 2
def longestStrChain(self, words: List[str]) -> int:
import collections
length = len(words)
if length < 2:
return length
pool = collections.defaultdict(list) # 将字符串按照其长度进行分组
dp = {}
for word in words:
pool[len(word)].append(word)
for key in sorted(pool.keys()):
if key - 1 not in pool:
continue
for word in pool[key]:
for j in range(key):
tmp = word[: j] + word[j + 1:]
if tmp in pool[key - 1]:
dp[word] = max(dp.get(word, 1), dp.get(tmp, 1) + 1)
return max(dp.values()) if dp else 1
| 33.785714
| 81
| 0.442283
| 299
| 2,365
| 3.494983
| 0.284281
| 0.03445
| 0.042105
| 0.051675
| 0.200957
| 0.200957
| 0.200957
| 0.091866
| 0
| 0
| 0
| 0.052364
| 0.418605
| 2,365
| 69
| 82
| 34.275362
| 0.707636
| 0.647357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a19e8bf83375a817e65cca3fb4f7daafac8434e
| 21,107
|
py
|
Python
|
IKFK Builder/IKFK_Builder.py
|
ssimbox/ssimbox-rigTools
|
824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c
|
[
"MIT"
] | 1
|
2021-01-19T13:36:42.000Z
|
2021-01-19T13:36:42.000Z
|
IKFK Builder/IKFK_Builder.py
|
ssimbox/sbx-autorig
|
824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c
|
[
"MIT"
] | 2
|
2021-03-29T22:15:08.000Z
|
2021-03-29T22:17:37.000Z
|
IKFK Builder/IKFK_Builder.py
|
ssimbox/ssimbox-rigTools
|
824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c
|
[
"MIT"
] | null | null | null |
from ctrlUI_lib import createClav2, createSphere
import maya.cmds as cmds
import maya.OpenMaya as om
from functools import partial
def duplicateChain(*args):
global ogChain
global chainLen
global switcherLoc
global side
global controllerColor
global clavCheckbox
global rigGrp, ctrlGrp
ogRootchain = cmds.ls(sl = True, type = "joint")[0]
ogChain = cmds.listRelatives(ogRootchain, ad = True, type = "joint")
ogChain.append(ogRootchain)
ogChain.reverse()
side = ogRootchain[0:2]
# Initialize input from UI
scaleController = cmds.intField(scaleField_UI, q=1, v=1)
blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1)
constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1)
chainMenu = cmds.optionMenu("chainMenu_UI", q=1, v=1)
clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0)
if side == "l_": controllerColor = rgb=(0, 0, 255)
elif side == "r_": controllerColor = rgb=(255, 0, 0)
if chainMenu == "Leg": chainLen = 5
else: #this is totally unscalable but for now it's ok
chainLen = 3
#suffix for the new chains
newJointList = ["_ik", "_fk", "_scale"]
for newJoint in newJointList:
for i in range(chainLen):
if blendCheckbox == 0 and constraintCheckBox == 0:
cmds.error("pls, select one relation type")
break
newJointName = ogChain[i] + newJoint
#create a joint, copy their position and freeze transform
cmds.joint(n = newJointName)
cmds.matchTransform(newJointName, ogChain[i])
cmds.makeIdentity(newJointName, a = 1, t = 0, r = 1, s = 0)
#deselect to make the two different hierarchies
cmds.select(cl = 1)
cmds.parent((ogChain[0] + "_ik"), world = True)
cmds.setAttr(ogChain[0] + "_ik.visibility", 0)
cmds.setAttr(ogChain[0] + "_fk.visibility", 0)
# Create a locator used for switching IK/FK mode and snap it between two joints
switcherLoc = cmds.spaceLocator(n=side + chainMenu + "_ikfk_Switch")
switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + "_grp")
cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow
cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp))
cmds.parent(switcherLoc, switcherLocGrp)
cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp))
cmds.addAttr(switcherLoc, ln="FKIK_Mode", at="short", min=0, max=1, k=1, r=1)
cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT
cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1)
#remove .t, .r, .s and .v from the channelbox
for coord in ["X", "Y", "Z"]:
cmds.setAttr(switcherLoc[0] + ".translate" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".rotate" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".scale" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".visibility", k=0, l=1)
# Create hierarchy groups
rigGrp = cmds.group(em=1, n= side + chainMenu + "_rig_grp")
ctrlGrp = cmds.group(em=1, n= side + chainMenu + "_ctrl_grp")
cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp))
cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp))
cmds.parent(ctrlGrp, rigGrp)
# Execute
if blendCheckbox == 1:
blendNodeFunc(scaleController, chainMenu)
if constraintCheckBox == 1:
constraintFunc(scaleController, chainMenu)
if clavCheckbox == 1:
clavSel(scaleController)
else:
cmds.parent(ogChain[0] + "_ik", ogChain[0] + "_fk", ctrlGrp)
cmds.parent(ogChain[0] + "_fk_anim_grp", ctrlGrp)
cmds.parent(switcherLocGrp, rigGrp)
def clavSel(scaleClav):
# Select clavicle Joint moving up and put it at the top of the chain
clavJoint = cmds.pickWalk(ogChain[0], d="up")[0]
#ogChain.insert(0, clavJoint)
clavController = createClav2(clavJoint + "_anim") # Import coordinates from ctrlUI_lib
cmds.delete(cmds.pointConstraint(clavJoint, clavController))
# Create offset group, FDH and move up
clavControllerGrp = cmds.group(n=clavController + "_grp", em=1)
cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp))
cmds.parent(clavController, clavControllerGrp)
fixedScale = scaleClav/4
cmds.scale(fixedScale, fixedScale, fixedScale, clavController)
cmds.makeIdentity(clavController, a=1)
cmds.move(0,10,0, clavControllerGrp, ws=1, r=1)
cmds.color(clavController, rgb=controllerColor)
# Move pivots on clavicle joint
piv = cmds.xform(clavJoint, q=True, ws=True, t=True)
cmds.xform(clavController, ws=True, piv=piv)
cmds.xform(clavControllerGrp, ws=True, piv=piv)
cmds.orientConstraint(clavController, clavJoint)
# Parent ik and fk chain under clavicle controller
cmds.parent((ogChain[0]+"_fk_anim_grp"),(ogChain[0] + "_ik"), (ogChain[0] + "_fk"), clavController)
cmds.parent(clavControllerGrp, ctrlGrp)
def visCheck(vis):
if vis == "Arm":
asd = True
if vis == "Leg":
asd = False
cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd)
# Buttons +1 and +3
count = 0
def addOneUnit(*args):
global count
count = count + 1
cmds.intField(scaleField_UI, v=1+count, e=1)
def addThreeUnit(*args):
global count
count = count + 3
cmds.intField(scaleField_UI, v=1+count, e=1)
def blendNodeFunc(scaleController, selectChain):
# Create some blendColors node with the same name of the joint
for x in range(chainLen):
blendColorsNode = cmds.createNode("blendColors", n = ogChain[x] + "_blend")
# Connect FK and IK chains into blendColors channels and then connect the output to the original joint chain
cmds.connectAttr((ogChain[x] + "_ik.rotate"), blendColorsNode + ".color1")
cmds.connectAttr((ogChain[x] + "_fk.rotate"), blendColorsNode + ".color2")
cmds.connectAttr((blendColorsNode + ".output"), (ogChain[x] + ".rotate" ))
cmds.connectAttr(switcherLoc[0]+".FKIK_Mode", blendColorsNode + ".blender")
ikChainBuild(scaleController, selectChain)
fkControllerCreator(scaleController, selectChain)
def constraintFunc(scaleController, selectChain):
# Create some blendColors node with the same name of the joint
for x in range(chainLen):
# Setup orient constraints
cmds.parentConstraint((ogChain[x] + "_ik"), ogChain[x])
cmds.parentConstraint((ogChain[x] + "_fk"), ogChain[x])
# Setup SDK naming convention
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
ikSdkDriven = ogChain[x] + "_parentConstraint1." + ogChain[x] + "_ikW0"
fkSdkDriven = ogChain[x] + "_parentConstraint1." + ogChain[x] + "_fkW1"
# Setup SDK
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1)
ikChainBuild(scaleController, selectChain)
fkControllerCreator(scaleController, selectChain)
def fkControllerCreator(fkSize, legOrArm):
orientController = cmds.optionMenu("UI_orientControllerMenu", q=1, v=1)
# Create controllers and group offsets
# Change rotation, color
for y in range(chainLen):
anim_group = cmds.group(em=1, n=ogChain[y] + "_fk_anim_grp")
fk_controller = cmds.circle(n=ogChain[y] + "_fk_anim")[0] # If not [0] it'll warn some stuff related to Maya underworld
# Set scale
cmds.scale(fkSize, fkSize, fkSize, fk_controller)
cmds.matchTransform(anim_group, ogChain[y])
cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller))
cmds.parent(fk_controller, anim_group)
# Set controller orientation based on second axis
if orientController == "x": cmds.rotate(90,0,0, fk_controller)
if orientController == "y": cmds.rotate(0,90,0, fk_controller)
if orientController == "z": cmds.rotate(0,0,90, fk_controller)
# Freeze transform, delete history and set color
cmds.makeIdentity(fk_controller, a = 1, t = 1, r = 1, s = 0)
cmds.delete(fk_controller, ch = 1)
cmds.color(fk_controller, rgb=controllerColor)
# Set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ogChain[0] + "_fk_anim_grp.visibility", cd=sdkDriver, v=1, dv=0)
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ogChain[0] + "_fk_anim_grp.visibility", cd=sdkDriver, v=0, dv=1)
# Lock .t and .s attributes
#for x in ["X", "Y", "Z"]:
#cmds.setAttr(fk_controller + ".translate" + x, k=0, l=1)
#cmds.setAttr(fk_controller + ".scale" + x, k=0, l=1)
# Create ordered hierarchy
for x in reversed(range(chainLen)):
if x == 0:
continue
cmds.parent(ogChain[x] + "_fk_anim_grp", ogChain[x-1] + "_fk_anim")
# Set orientConstraint _anim controllers with _fk hierarchy
for x in range(chainLen):
cmds.parentConstraint(ogChain[x] + "_fk_anim", ogChain[x] + "_fk")
# If leg chain is selected delete toe controller, else not
if legOrArm == "Leg":
if x == (chainLen-1):
cmds.delete(ogChain[chainLen-1] + "_fk_anim_grp")
def ikChainBuild(scaleIK, HandleName):
masterIkHandle = cmds.ikHandle(sj=ogChain[0] + "_ik", ee=ogChain[2] + "_ik", sol="ikRPsolver", n=side + HandleName + "_ikHandle")
cmds.setAttr(masterIkHandle[0] + ".visibility", 0)
if HandleName == "Arm":
#print ("scaleController", scaleField_UI)
armIk(scaleIK, masterIkHandle, HandleName)
else:
#print ("scaleController", scaleField_UI)
legIK(scaleIK, masterIkHandle, HandleName)
def armIk(armIkScale, armikHandle, pvName):
ikHandJoint = cmds.joint(n=side + "hand_ik")
cmds.delete(cmds.parentConstraint(ogChain[2] + "_ik", ikHandJoint))
cmds.makeIdentity(ikHandJoint, a = 1, t = 1, r = 1, s = 0)
if side == "l_":
cmds.move(10,0,0, ikHandJoint, r=1, os=1)
else:
cmds.move(-10,0,0, ikHandJoint, r=1, os=1)
cmds.parent(ikHandJoint, ogChain[2] + "_ik")
handikHandle = cmds.ikHandle(sj=ogChain[2] + "_ik", ee=ikHandJoint, n=side + "hand_ikHandle", sol="ikSCsolver")
cmds.parent(handikHandle[0], armikHandle[0])
#create IK controller ---> CUBE
crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5),
(-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5),
(-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5),
(0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5),
(0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)],
k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5], n=side + "hand_ik_anim" )
# Rename shape node
shapeList = cmds.listRelatives(crvIkCube, s = True)
cmds.rename(shapeList, crvIkCube + "Shape")
crvIkCubeGrp = cmds.group(n=crvIkCube + "_grp")
cmds.delete(cmds.parentConstraint(ogChain[2] + "_ik", crvIkCubeGrp))
cmds.color(crvIkCube, rgb=controllerColor)
cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp)
cmds.parent(armikHandle[0], crvIkCube)
pvController = createSphere(nome= side+pvName+"_PV")
findPoleVector(loc=pvController, targetHandle=armikHandle[0])
cmds.addAttr(pvController, at="enum", enumName = "------", ln="Attributes", k=1, r=1)
cmds.addAttr(pvController, ln="Follow", k=1, r=1, min=0, max=1)
cmds.addAttr(pvController, ln="Follow_Clav_Hand", k=1, r=1, min=0, max=1, dv=0.5)
# Parent ikController and PV under _rig_GRP
cmds.parent(crvIkCubeGrp, pvController + "_grp" ,rigGrp)
#set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(crvIkCubeGrp + ".visibility", cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=0, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(crvIkCubeGrp + ".visibility", cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=1, dv=1)
def legIK(ikFootScale, legikHandle, pvName):
ballikHandle = cmds.ikHandle(sj=ogChain[2] + "_ik", ee=ogChain[3] + "_ik", sol="ikSCsolver", n=side + "ball_ikHandle")
toeikHandle = cmds.ikHandle(sj=ogChain[3] + "_ik", ee=ogChain[4] + "_ik", sol="ikSCsolver", n=side + "toe_ikHandle")
# Create and place ik controller
ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5),
(0, 0,-3), (0.784, 0, -2.5), (1.108, 0, 0), (0.997, 0, 1.789), (0, 0, 2.39)],
k=[0,1,2,3,4,5,6,7,8,9,10], n=side + "leg_anim_ik")
# Rename shape node
shapeList = cmds.listRelatives(ikFootControl, s = True)
cmds.rename(shapeList, ikFootControl + "Shape")
ikFootControlGrp = cmds.group(em=1, n=ikFootControl + "_grp")
cmds.parent(ikFootControl, ikFootControlGrp)
# Set size, freeze transform, create offset group and color
cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp)
cmds.move(0,-3.2,0, ikFootControl, r=1)
cmds.makeIdentity(ikFootControl, a = 1, t = 1, r = 1, s = 1)
cmds.delete(ikFootControl, ch = 1)
cmds.delete(cmds.pointConstraint(ogChain[3] + "_ik", ikFootControlGrp))
cmds.color(ikFootControl, rgb=controllerColor)
# pivot snapping on ankle joint
piv = cmds.xform(ogChain[2], q=True, ws=True, t=True)
cmds.xform(ikFootControl, ws=True, piv=piv)
cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl)
#---------- Making Pole Vector -------------#
# Pole Vector controller ---> Sphere
pvController = createSphere(nome= side+pvName+"_PV")
findPoleVector(loc=pvController, targetHandle=legikHandle[0])
cmds.addAttr(pvController, ln="Follow", k=1, r=1, min=0, max=1)
cmds.addAttr(pvController, ln="Follow_Leg_Foot", k=1, r=1, min=0, max=1, dv=0.5)
# Create attributes on ikController
cmds.addAttr(ikFootControl, at="enum",enumName = "------", ln="Attributes", k=1, r=1)
cmds.addAttr(ikFootControl, ln="Twist", k=1, r=1)
cmds.addAttr(ikFootControl, ln="Lateral_Roll", k=1, r=1)
for bone in ["Ankle", "Ball", "Toe_Tap"]:
cmds.addAttr(ikFootControl, at="enum", enumName = "------", ln=bone, k=1, r=1)
for coord in ["X", "Y", "Z"]:
cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1)
# Parent ikController and PV under _rig_GRP
cmds.parent(ikFootControlGrp, pvController + "_grp" ,rigGrp)
# Set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ikFootControlGrp + ".visibility", cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=0, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ikFootControlGrp + ".visibility", cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=1, dv=1)
def findPoleVector(loc, targetHandle):
# This func is kinda black magic
# All credits to https://vimeo.com/66015036
start = cmds.xform(ogChain[0], q=1, ws=1, t=1)
mid = cmds.xform(ogChain[1], q=1, ws=1, t=1)
end = cmds.xform(ogChain[2], q=1, ws=1, t=1)
startV = om.MVector(start[0], start[1], start[2])
midV = om.MVector(mid[0], mid[1], mid[2])
endV = om.MVector(end[0], end[1], end[2])
startEnd = endV - startV
startMid = midV - startV
dotP = startMid * startEnd
proj = float(dotP) / float(startEnd.length())
startEndN = startEnd.normal()
projV = startEndN * proj
arrowV = startMid - projV
arrowV*= 10 #distance from joint
finalV = arrowV + midV
cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z))
locGrp = cmds.group(em=1, n=loc + "_grp")
#snap, parent offsetGrp, set color and then make Constraint
cmds.delete(cmds.pointConstraint(loc, locGrp))
cmds.parent(loc, locGrp)
cmds.makeIdentity(loc, a=1, t=1, r=1, s=1)
cmds.color(loc, rgb=controllerColor)
cmds.poleVectorConstraint(loc, targetHandle)
def showUI():
global chainMenu_UI
global scaleField_UI
global orientControllerMenu
global constraintCheckBox_UI
global blendCheckbox_UI
global plusOne_UI
global plusThree_UI
global clavCheckbox_UI
if cmds.window("switchModeUI", ex = 1): cmds.deleteUI("switchModeUI")
myWin = cmds.window("switchModeUI", t="IKFK Builder", w=300, h=300, s=1)
mainLayout = cmds.formLayout(nd=50)
# Useful in selecting which chain: Leg or Arm?
chainMenu_UI = cmds.optionMenu("chainMenu_UI", l="Which chain?", cc=visCheck)
cmds.menuItem(l="Leg")
cmds.menuItem(l="Arm")
constraintCheckBox_UI = cmds.checkBox(label = "orientConsts+SDK Mode", v=0,
cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1)))
blendCheckbox_UI = cmds.checkBox(label = "blendColor Mode", v=0,
cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1)))
clavCheckbox_UI = cmds.checkBox(l="Clavicle", vis=0)
# Useful in orienting FK controllers as the user wishes. Maybe this can be improved
orientControllerMenu = cmds.optionMenu("UI_orientControllerMenu", l="What's the secondary axis")
cmds.menuItem(l="x")
cmds.menuItem(l="y")
cmds.menuItem(l="z")
# Scale the UI becase you'll never know
scaleControllerText = cmds.text(l="Controllers size")
scaleField_UI = cmds.intField(en=10, v=1, min=1)
plusOne_UI = cmds.button(l="+1", c=addOneUnit)
plusThree_UI = cmds.button(l="+3", c=addThreeUnit)
separator01 = cmds.separator(h=5)
separator02 = cmds.separator(h=5)
#
execButton = cmds.button(l="Duplicate Chain", c=partial(duplicateChain, blendNodeFunc, constraintFunc))
cmds.formLayout(mainLayout, e=1,
attachForm = [
(chainMenu_UI, "left", 8), (chainMenu_UI, "top", 5), (chainMenu_UI, "right", 80),
(clavCheckbox_UI, "top", 7),
(blendCheckbox_UI, "left", 5),
(separator01, "left", 1), (separator01, "right", 2),
#--------------------
(scaleField_UI, "right", 65), (scaleField_UI, "left", 5),
(plusOne_UI, "right", 5),
(plusThree_UI, "right", 5),
(scaleControllerText, "left", 5),
(separator02, "left", 1), (separator02, "right", 2),
#--------------------
(orientControllerMenu, "left", 8), (orientControllerMenu, "top", 5),
#--------------------
(execButton, "bottom", 5), (execButton, "left", 5), (execButton, "right", 5),
],
attachControl = [(clavCheckbox_UI, "left", 10, chainMenu_UI),
(constraintCheckBox_UI, "top", 5, chainMenu_UI),
(blendCheckbox_UI, "top", 5, chainMenu_UI),
(separator01, "top", 5, constraintCheckBox_UI),
(scaleField_UI, "top", 5, separator01),
(scaleControllerText, "top", 8, separator01),
(plusOne_UI, "top", 4, separator01),
(plusThree_UI, "top", 4, separator01),
(separator02, "top", 6, scaleField_UI),
(orientControllerMenu, "top", 6, separator02),
],
attachPosition = [#(clavCheckbox_UI, "right", 0, 10),
(constraintCheckBox_UI, "left", 0, 26), (blendCheckbox_UI, "right", 10, 24),
(scaleControllerText, "left", 5, 0), (scaleField_UI, "left", 110, 0), #(scaleField_UI, "right",0, 40),
(plusOne_UI, "right", 0, 45),
(plusThree_UI, "right", 0, 49)
]
)
cmds.showWindow(myWin)
showUI()
| 41.386275
| 140
| 0.607713
| 2,599
| 21,107
| 4.865718
| 0.166987
| 0.007908
| 0.01115
| 0.014866
| 0.319706
| 0.25431
| 0.223312
| 0.178475
| 0.157204
| 0.133797
| 0
| 0.038685
| 0.250486
| 21,107
| 510
| 141
| 41.386275
| 0.760683
| 0.115791
| 0
| 0.119632
| 0
| 0
| 0.071843
| 0.004947
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039877
| false
| 0
| 0.01227
| 0
| 0.052147
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a1cf3b76d95e590eb1efa6bc9673c121f9d7242
| 5,128
|
py
|
Python
|
pipng/imagescale-q-m.py
|
nwiizo/joke
|
808c4c998cc7f5b7f6f3fb5a3ce421588a70c087
|
[
"MIT"
] | 1
|
2017-01-11T06:12:24.000Z
|
2017-01-11T06:12:24.000Z
|
pipng/imagescale-q-m.py
|
ShuyaMotouchi/joke
|
808c4c998cc7f5b7f6f3fb5a3ce421588a70c087
|
[
"MIT"
] | null | null | null |
pipng/imagescale-q-m.py
|
ShuyaMotouchi/joke
|
808c4c998cc7f5b7f6f3fb5a3ce421588a70c087
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import argparse
import collections
import math
import multiprocessing
import os
import sys
import Image
import Qtrac
Result = collections.namedtuple("Result", "copied scaled name")
Summary = collections.namedtuple("Summary", "todo copied scaled canceled")
def main():
size, smooth, source, target, concurrency = handle_commandline()
Qtrac.report("starting...")
summary = scale(size, smooth, source, target, concurrency)
summarize(summary, concurrency)
def handle_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--concurrency", type=int,
default=multiprocessing.cpu_count(),
help="specify the concurrency (for debugging and "
"timing) [default: %(default)d]")
parser.add_argument("-s", "--size", default=400, type=int,
help="make a scaled image that fits the given dimension "
"[default: %(default)d]")
parser.add_argument("-S", "--smooth", action="store_true",
help="use smooth scaling (slow but good for text)")
parser.add_argument("source",
help="the directory containing the original .xpm images")
parser.add_argument("target",
help="the directory for the scaled .xpm images")
args = parser.parse_args()
source = os.path.abspath(args.source)
target = os.path.abspath(args.target)
if source == target:
args.error("source and target must be different")
if not os.path.exists(args.target):
os.makedirs(target)
return args.size, args.smooth, source, target, args.concurrency
def scale(size, smooth, source, target, concurrency):
canceled = False
jobs = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
create_processes(size, smooth, jobs, results, concurrency)
todo = add_jobs(source, target, jobs)
try:
jobs.join()
except KeyboardInterrupt: # May not work on Windows
Qtrac.report("canceling...")
canceled = True
copied = scaled = 0
while not results.empty(): # Safe because all jobs have finished
result = results.get_nowait()
copied += result.copied
scaled += result.scaled
return Summary(todo, copied, scaled, canceled)
def create_processes(size, smooth, jobs, results, concurrency):
for _ in range(concurrency):
process = multiprocessing.Process(target=worker, args=(size,
smooth, jobs, results))
process.daemon = True
process.start()
def worker(size, smooth, jobs, results):
while True:
try:
sourceImage, targetImage = jobs.get()
try:
result = scale_one(size, smooth, sourceImage, targetImage)
Qtrac.report("{} {}".format("copied" if result.copied else
"scaled", os.path.basename(result.name)))
results.put(result)
except Image.Error as err:
Qtrac.report(str(err), True)
finally:
jobs.task_done()
def add_jobs(source, target, jobs):
for todo, name in enumerate(os.listdir(source), start=1):
sourceImage = os.path.join(source, name)
targetImage = os.path.join(target, name)
jobs.put((sourceImage, targetImage))
return todo
def scale_one(size, smooth, sourceImage, targetImage):
oldImage = Image.from_file(sourceImage)
if oldImage.width <= size and oldImage.height <= size:
oldImage.save(targetImage)
return Result(1, 0, targetImage)
else:
if smooth:
scale = min(size / oldImage.width, size / oldImage.height)
newImage = oldImage.scale(scale)
else:
stride = int(math.ceil(max(oldImage.width / size,
oldImage.height / size)))
newImage = oldImage.subsample(stride)
newImage.save(targetImage)
return Result(0, 1, targetImage)
def summarize(summary, concurrency):
message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
difference = summary.todo - (summary.copied + summary.scaled)
if difference:
message += "skipped {} ".format(difference)
message += "using {} processes".format(concurrency)
if summary.canceled:
message += " [canceled]"
Qtrac.report(message)
print()
if __name__ == "__main__":
main()
| 36.892086
| 76
| 0.63475
| 585
| 5,128
| 5.517949
| 0.353846
| 0.027881
| 0.026332
| 0.026022
| 0.178748
| 0.118959
| 0.049566
| 0
| 0
| 0
| 0
| 0.004491
| 0.261895
| 5,128
| 138
| 77
| 37.15942
| 0.848085
| 0.12617
| 0
| 0.046729
| 0
| 0
| 0.123066
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074766
| false
| 0
| 0.074766
| 0
| 0.196262
| 0.009346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a1f0af3de00ce3a7fdb8765f1bbb9115dd67f60
| 35,122
|
py
|
Python
|
test/integration_test.py
|
NoopDog/azul
|
37614eff627888065c7b0a277b3137b8a587ed51
|
[
"Apache-2.0"
] | null | null | null |
test/integration_test.py
|
NoopDog/azul
|
37614eff627888065c7b0a277b3137b8a587ed51
|
[
"Apache-2.0"
] | null | null | null |
test/integration_test.py
|
NoopDog/azul
|
37614eff627888065c7b0a277b3137b8a587ed51
|
[
"Apache-2.0"
] | null | null | null |
from abc import (
ABCMeta,
)
from concurrent.futures.thread import (
ThreadPoolExecutor,
)
from contextlib import (
contextmanager,
)
import csv
from functools import (
lru_cache,
)
import gzip
from io import (
BytesIO,
TextIOWrapper,
)
import json
import logging
import os
import random
import re
import sys
import threading
import time
from typing import (
AbstractSet,
Any,
Dict,
IO,
List,
Mapping,
Optional,
Sequence,
Tuple,
cast,
)
import unittest
from unittest import (
mock,
)
import uuid
from zipfile import (
ZipFile,
)
import attr
import chalice.cli
from furl import (
furl,
)
from google.cloud import (
storage,
)
from google.oauth2 import (
service_account,
)
from hca.dss import (
DSSClient,
)
from hca.util import (
SwaggerAPIException,
)
from humancellatlas.data.metadata.helpers.dss import (
download_bundle_metadata,
)
from more_itertools import (
first,
one,
)
from openapi_spec_validator import (
validate_spec,
)
import requests
from azul import (
CatalogName,
cached_property,
config,
drs,
)
from azul.azulclient import (
AzulClient,
AzulClientNotificationError,
)
from azul.drs import (
AccessMethod,
)
import azul.dss
from azul.es import (
ESClientFactory,
)
from azul.indexer import (
BundleFQID,
)
from azul.indexer.index_service import (
IndexService,
)
from azul.logging import (
configure_test_logging,
)
from azul.modules import (
load_app_module,
)
from azul.portal_service import (
PortalService,
)
from azul.requests import (
requests_session_with_retry_after,
)
from azul.types import (
JSON,
)
from azul_test_case import (
AlwaysTearDownTestCase,
AzulTestCase,
)
log = logging.getLogger(__name__)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging(log)
class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta):
bundle_uuid_prefix: str = ''
@cached_property
def azul_client(self):
return AzulClient(prefix=self.bundle_uuid_prefix)
class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase):
prefix_length = 2
max_bundles = 64
min_timeout = 20 * 60
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.bundle_uuid_prefix = ''.join([
str(random.choice('abcdef0123456789'))
for _ in range(cls.prefix_length)
])
def setUp(self) -> None:
super().setUp()
self.pruning_seed = random.randint(0, sys.maxsize)
@contextmanager
def subTest(self, msg: Any = None, **params: Any):
log.info('Beginning sub-test [%s] %r', msg, params)
with super().subTest(msg, **params):
try:
yield
except BaseException:
log.info('Failed sub-test [%s] %r', msg, params)
raise
else:
log.info('Successful sub-test [%s] %r', msg, params)
def test(self):
@attr.s(auto_attribs=True, kw_only=True)
class Catalog:
name: CatalogName
notifications: Mapping[BundleFQID, JSON]
@property
def num_bundles(self):
return len(self.notifications)
@property
def bundle_fqids(self) -> AbstractSet[BundleFQID]:
return self.notifications.keys()
def notifications_with_duplicates(self) -> List[JSON]:
num_duplicates = self.num_bundles // 2
notifications = list(self.notifications.values())
# Index some bundles again to test that we handle duplicate additions.
# Note: random.choices() may pick the same element multiple times so
# some notifications will end up being sent three or more times.
notifications.extend(random.choices(notifications, k=num_duplicates))
return notifications
def _wait_for_indexer():
num_bundles = sum(catalog.num_bundles for catalog in catalogs)
self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles,
min_timeout=self.min_timeout)
# For faster modify-deploy-test cycles, set `delete` to False and run
# test once. Then also set `index` to False. Subsequent runs will use
# catalogs from first run. Don't commit changes to these two lines.
index = True
delete = True
if index:
self._reset_indexer()
catalogs: List[Catalog] = [
Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else {})
for catalog in config.integration_test_catalogs
]
if index:
for catalog in catalogs:
log.info('Starting integration test for catalog %r with %i bundles from prefix %r.',
catalog, catalog.num_bundles, self.bundle_uuid_prefix)
self.azul_client.index(catalog=catalog.name,
notifications=catalog.notifications_with_duplicates())
_wait_for_indexer()
for catalog in catalogs:
self._assert_catalog_complete(catalog=catalog.name,
entity_type='files',
bundle_fqids=catalog.bundle_fqids)
for catalog in catalogs:
self._test_manifest(catalog.name)
self._test_dos_and_drs(catalog.name)
self._test_repository_files(catalog.name)
if index and delete:
for catalog in catalogs:
self.azul_client.index(catalog=catalog.name,
notifications=catalog.notifications_with_duplicates(),
delete=True)
_wait_for_indexer()
for catalog in catalogs:
self._assert_catalog_empty(catalog.name)
self._test_other_endpoints()
def _reset_indexer(self):
# While it's OK to erase the integration test catalog, the queues are
# shared by all catalogs and we can't afford to trash them in a stable
# deployment like production.
self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs,
# Can't purge the queues in stable deployment as
# they may contain work for non-IT catalogs.
purge_queues=not config.is_stable_deployment(),
delete_indices=True,
create_indices=True)
def _test_other_endpoints(self):
service_paths = (
'/',
'/openapi',
'/version',
'/index/summary',
'/index/files/order',
)
service_routes = (
(config.service_endpoint(), path)
for path in service_paths
)
health_endpoints = (
config.service_endpoint(),
config.indexer_endpoint()
)
health_paths = (
'', # default keys for lambda
'/', # all keys
'/basic',
'/elasticsearch',
'/queues',
'/progress',
'/api_endpoints',
'/other_lambdas'
)
health_routes = (
(endpoint, '/health' + path)
for endpoint in health_endpoints
for path in health_paths
)
for endpoint, path in (*service_routes, *health_routes):
with self.subTest('other_endpoints', endpoint=endpoint, path=path):
self._check_endpoint(endpoint, path)
def _test_manifest(self, catalog: CatalogName):
for format_, validator, attempts in [
(None, self._check_manifest, 1),
('compact', self._check_manifest, 1),
('full', self._check_manifest, 3),
('terra.bdbag', self._check_terra_bdbag, 1)
]:
with self.subTest('manifest',
catalog=catalog,
format=format_,
attempts=attempts):
assert attempts > 0
params = dict(catalog=catalog)
if format_ is not None:
params['format'] = format_
for attempt in range(attempts):
start = time.time()
response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params)
log.info('Request %i/%i took %.3fs to execute.', attempt + 1, attempts, time.time() - start)
validator(catalog, response)
@lru_cache(maxsize=None)
def _get_one_file_uuid(self, catalog: CatalogName) -> str:
filters = {'fileFormat': {'is': ['fastq.gz', 'fastq']}}
response = self._check_endpoint(endpoint=config.service_endpoint(),
path='/index/files',
query=dict(catalog=catalog,
filters=json.dumps(filters),
size=1,
order='asc',
sort='fileSize'))
hits = json.loads(response)
return one(one(hits['hits'])['files'])['uuid']
def _test_dos_and_drs(self, catalog: CatalogName):
if config.is_dss_enabled(catalog) and config.dss_direct_access:
file_uuid = self._get_one_file_uuid(catalog)
self._test_dos(catalog, file_uuid)
self._test_drs(catalog, file_uuid)
@cached_property
def _requests(self) -> requests.Session:
return requests_session_with_retry_after()
def _check_endpoint(self,
endpoint: str,
path: str,
query: Optional[Mapping[str, Any]] = None) -> bytes:
query = {} if query is None else {k: str(v) for k, v in query.items()}
url = furl(endpoint, path=path, query=query)
return self._get_url_content(url.url)
def _get_url_content(self, url: str) -> bytes:
return self._get_url(url).content
def _get_url(self, url: str, allow_redirects=True) -> requests.Response:
log.info('GET %s', url)
response = self._requests.get(url, allow_redirects=allow_redirects)
expected_statuses = (200,) if allow_redirects else (200, 301, 302)
self._assertResponseStatus(response, expected_statuses)
return response
def _assertResponseStatus(self,
response: requests.Response,
expected_statuses: Tuple[int, ...] = (200,)):
self.assertIn(response.status_code,
expected_statuses,
(response.reason, response.content))
def _check_manifest(self, _catalog: CatalogName, response: bytes):
self.__check_manifest(BytesIO(response), 'bundle_uuid')
def _check_terra_bdbag(self, catalog: CatalogName, response: bytes):
with ZipFile(BytesIO(response)) as zip_fh:
data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data')
file_path = os.path.join(data_path, 'participants.tsv')
with zip_fh.open(file_path) as file:
rows = self.__check_manifest(file, 'bundle_uuid')
for row in rows:
# Terra doesn't allow colons in this column, but they may
# exist in versions indexed by TDR
self.assertNotIn(':', row['entity:participant_id'])
suffix = '__file_drs_uri'
header, *rows = rows
prefixes = [
c[:-len(suffix)]
for c in header.keys()
if c.endswith(suffix)
]
size, drs_uri, name = min(
(
int(row[prefix + '__file_size']),
row[prefix + suffix],
row[prefix + '__file_name'],
)
for row in rows
for prefix in prefixes
if row[prefix + suffix]
)
log.info('Resolving %r (%r) from catalog %r (%i bytes)',
drs_uri, name, catalog, size)
plugin = self.azul_client.repository_plugin(catalog)
drs_client = plugin.drs_client()
access = drs_client.get_object(drs_uri, access_method=AccessMethod.https)
self.assertIsNone(access.headers)
self.assertEqual('https', furl(access.url).scheme)
# Try HEAD first because it's more efficient, fall back to GET if the
# DRS implementations prohibits it, like Azul's DRS proxy of DSS.
for method in ('HEAD', 'GET'):
log.info('%s %s', method, access.url)
# For DSS, any HTTP client should do but for TDR we need to use an
# authenticated client. TDR does return a Bearer token in the `headers`
# part of the DRS response but we know that this token is the same as
# the one we're making the DRS request with.
response = drs_client.http_client.request(method, access.url)
if response.status != 403:
break
self.assertEqual(200, response.status, response.data)
self.assertEqual(size, int(response.headers['Content-Length']))
def __check_manifest(self, file: IO[bytes], uuid_field_name: str) -> List[Mapping[str, str]]:
text = TextIOWrapper(file)
reader = csv.DictReader(text, delimiter='\t')
rows = list(reader)
log.info(f'Manifest contains {len(rows)} rows.')
self.assertGreater(len(rows), 0)
self.assertIn(uuid_field_name, reader.fieldnames)
bundle_uuid = rows[0][uuid_field_name]
self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid)))
return rows
def _test_repository_files(self, catalog: str):
with self.subTest('repository_files', catalog=catalog):
file_uuid = self._get_one_file_uuid(catalog)
response = self._check_endpoint(endpoint=config.service_endpoint(),
path=f'/fetch/repository/files/{file_uuid}',
query=dict(catalog=catalog))
response = json.loads(response)
while response['Status'] != 302:
self.assertEqual(301, response['Status'])
response = self._get_url(response['Location']).json()
content = self._get_url_content(response['Location'])
self._validate_fastq_content(content)
def _test_drs(self, catalog: CatalogName, file_uuid: str):
repository_plugin = self.azul_client.repository_plugin(catalog)
drs = repository_plugin.drs_client()
for access_method in AccessMethod:
with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https):
log.info('Resolving file %r with DRS using %r', file_uuid, access_method)
drs_uri = f'drs://{config.api_lambda_domain("service")}/{file_uuid}'
access = drs.get_object(drs_uri, access_method=access_method)
self.assertIsNone(access.headers)
if access.method is AccessMethod.https:
content = self._get_url_content(access.url)
elif access.method is AccessMethod.gs:
content = self._get_gs_url_content(access.url)
else:
self.fail(access_method)
self._validate_fastq_content(content)
def _test_dos(self, catalog: CatalogName, file_uuid: str):
with self.subTest('dos', catalog=catalog):
log.info('Resolving file %s with DOS', file_uuid)
response = self._check_endpoint(config.service_endpoint(),
path=drs.dos_object_url_path(file_uuid),
query=dict(catalog=catalog))
json_data = json.loads(response)['data_object']
file_url = first(json_data['urls'])['url']
while True:
response = self._get_url(file_url, allow_redirects=False)
# We handle redirects ourselves so we can log each request
if response.status_code in (301, 302):
file_url = response.headers['Location']
try:
retry_after = response.headers['Retry-After']
except KeyError:
pass
else:
time.sleep(int(retry_after))
else:
break
self._assertResponseStatus(response)
self._validate_fastq_content(response.content)
def _get_gs_url_content(self, url: str) -> bytes:
self.assertTrue(url.startswith('gs://'))
path = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
credentials = service_account.Credentials.from_service_account_file(path)
storage_client = storage.Client(credentials=credentials)
content = BytesIO()
storage_client.download_blob_to_file(url, content)
return content.getvalue()
def _validate_fastq_content(self, content: bytes):
# Check signature of FASTQ file.
with gzip.open(BytesIO(content)) as buf:
fastq = buf.read(1024 * 1024)
lines = fastq.splitlines()
# Assert first character of first and third line of file (see https://en.wikipedia.org/wiki/FASTQ_format).
self.assertTrue(lines[0].startswith(b'@'))
self.assertTrue(lines[2].startswith(b'+'))
def _prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID, JSON]:
bundle_fqids = self.azul_client.list_bundles(catalog)
bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles)
return {
bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid)
for bundle_fqid in bundle_fqids
}
def _prune_test_bundles(self,
catalog: CatalogName,
bundle_fqids: Sequence[BundleFQID],
max_bundles: int
) -> List[BundleFQID]:
seed = self.pruning_seed
log.info('Selecting %i bundles with projects, out of %i candidates, using random seed %i.',
max_bundles, len(bundle_fqids), seed)
random_ = random.Random(x=seed)
# The same seed should give same random order so we need to have a
# deterministic order in the input list.
bundle_fqids = sorted(bundle_fqids)
random_.shuffle(bundle_fqids)
# Pick bundles off of the randomly ordered input until we have the
# desired number of bundles with project metadata.
filtered_bundle_fqids = []
for bundle_fqid in bundle_fqids:
if len(filtered_bundle_fqids) < max_bundles:
if self.azul_client.bundle_has_project_json(catalog, bundle_fqid):
filtered_bundle_fqids.append(bundle_fqid)
else:
break
return filtered_bundle_fqids
def _assert_catalog_complete(self,
catalog: CatalogName,
entity_type: str,
bundle_fqids: AbstractSet[BundleFQID]) -> None:
with self.subTest('catalog_complete', catalog=catalog):
expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids))
obsolete_fqids = bundle_fqids - expected_fqids
if obsolete_fqids:
log.debug('Ignoring obsolete bundle versions %r', obsolete_fqids)
num_bundles = len(expected_fqids)
timeout = 600
indexed_fqids = set()
log.debug('Expecting bundles %s ', sorted(expected_fqids))
retries = 0
deadline = time.time() + timeout
while True:
hits = self._get_entities(catalog, entity_type)
indexed_fqids.update(
BundleFQID(bundle['bundleUuid'], bundle['bundleVersion'])
for hit in hits
for bundle in hit.get('bundles', [])
)
log.info('Detected %i of %i bundles in %i hits for entity type %s on try #%i.',
len(indexed_fqids), num_bundles, len(hits), entity_type, retries)
if len(indexed_fqids) == num_bundles:
log.info('Found the expected %i bundles.', num_bundles)
break
elif len(indexed_fqids) > num_bundles:
log.error('Found %i bundles, more than the expected %i.',
len(indexed_fqids), num_bundles)
break
elif time.time() > deadline:
log.error('Only found %i of %i bundles in under %i seconds.',
len(indexed_fqids), num_bundles, timeout)
break
else:
retries += 1
time.sleep(5)
self.assertSetEqual(indexed_fqids, expected_fqids)
entity_types = ['files', 'projects', 'samples', 'bundles']
def _assert_catalog_empty(self, catalog: CatalogName):
for entity_type in self.entity_types:
with self.subTest('catalog_empty',
catalog=catalog,
entity_type=entity_type):
hits = self._get_entities(catalog, entity_type)
self.assertEqual([], [hit['entryId'] for hit in hits])
def _get_entities(self, catalog: CatalogName, entity_type):
entities = []
size = 100
params = dict(catalog=catalog,
size=str(size))
url = furl(url=config.service_endpoint(),
path=('index', entity_type),
query_params=params
).url
while True:
response = self._get_url(url)
body = response.json()
hits = body['hits']
entities.extend(hits)
url = body['pagination']['next']
if url is None:
break
return entities
def _assert_indices_exist(self, catalog: CatalogName):
"""
Aside from checking that all indices exist this method also asserts
that we can instantiate a local ES client pointing at a real, remote
ES domain.
"""
es_client = ESClientFactory.get()
service = IndexService()
for index_name in service.index_names(catalog):
self.assertTrue(es_client.indices.exists(index_name))
class AzulClientIntegrationTest(IntegrationTestCase):
def test_azul_client_error_handling(self):
invalid_notification = {}
notifications = [invalid_notification]
self.assertRaises(AzulClientNotificationError,
self.azul_client.index,
first(config.integration_test_catalogs),
notifications)
class PortalRegistrationIntegrationTest(IntegrationTestCase):
# FIXME: Re-enable once overloading of S3 API is resolved
# https://github.com/DataBiosphere/azul/issues/2399
@unittest.skipIf(True or config.is_main_deployment(), 'Test would pollute portal DB')
def test_concurrent_portal_db_crud(self):
"""
Use multithreading to simulate multiple users simultaneously modifying
the portals database.
"""
# Currently takes about 50 seconds and creates a 25 kb db file.
n_threads = 10
n_tasks = n_threads * 10
n_ops = 5
portal_service = PortalService()
entry_format = 'task={};op={}'
def run(thread_count):
for op_count in range(n_ops):
mock_entry = cast(JSON, {
"portal_id": "foo",
"integrations": [
{
"integration_id": "bar",
"entity_type": "project",
"integration_type": "get",
"entity_ids": ["baz"]
}
],
"mock-count": entry_format.format(thread_count, op_count)
})
portal_service._crud(lambda db: list(db) + [mock_entry])
old_db = portal_service.read()
with ThreadPoolExecutor(max_workers=n_threads) as executor:
futures = [executor.submit(run, i) for i in range(n_tasks)]
self.assertTrue(all(f.result() is None for f in futures))
new_db = portal_service.read()
old_entries = [portal for portal in new_db if 'mock-count' not in portal]
self.assertEqual(old_entries, old_db)
mock_counts = [portal['mock-count'] for portal in new_db if 'mock-count' in portal]
self.assertEqual(len(mock_counts), len(set(mock_counts)))
self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i in range(n_tasks) for j in range(n_ops)})
# Reset to pre-test state.
portal_service.overwrite(old_db)
class OpenAPIIntegrationTest(AzulTestCase):
def test_openapi(self):
service = config.service_endpoint()
response = requests.get(service + '/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['content-type'], 'text/html')
self.assertGreater(len(response.content), 0)
# validate OpenAPI spec
response = requests.get(service + '/openapi')
response.raise_for_status()
spec = response.json()
validate_spec(spec)
class DSSIntegrationTest(AzulTestCase):
def test_patched_dss_client(self):
query = {
"query": {
"bool": {
"must_not": [
{
"term": {
"admin_deleted": True
}
}
],
"must": [
{
"exists": {
"field": "files.project_json"
}
},
{
"range": {
"manifest.version": {
"gte": "2019-04-01"
}
}
}
]
}
}
}
self.maxDiff = None
for direct in {config.dss_direct_access, False}:
for replica in 'aws', 'gcp':
if direct:
with self._failing_s3_get_object():
dss_client = azul.dss.direct_access_client()
self._test_dss_client(direct, query, dss_client, replica, fallback=True)
dss_client = azul.dss.direct_access_client()
self._test_dss_client(direct, query, dss_client, replica, fallback=False)
else:
dss_client = azul.dss.client()
self._test_dss_client(direct, query, dss_client, replica, fallback=False)
class SpecialError(Exception):
pass
def _failing_s3_get_object(self):
def make_mock(**kwargs):
original = kwargs['spec']
def mock_boto3_client(service, *args, **kwargs):
if service == 's3':
mock_s3 = mock.MagicMock()
mock_s3.get_object.side_effect = self.SpecialError()
return mock_s3
else:
return original(service, *args, **kwargs)
return mock_boto3_client
return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock)
def _test_dss_client(self, direct: bool, query: JSON, dss_client: DSSClient, replica: str, fallback: bool):
with self.subTest(direct=direct, replica=replica, fallback=fallback):
response = dss_client.post_search(es_query=query, replica=replica, per_page=10)
bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.')
with mock.patch('azul.dss.logger') as captured_log:
_, manifest, metadata = download_bundle_metadata(client=dss_client,
replica=replica,
uuid=bundle_uuid,
version=bundle_version,
num_workers=config.num_dss_workers)
log.info('Captured log calls: %r', captured_log.mock_calls)
self.assertGreater(len(metadata), 0)
self.assertGreater(set(f['name'] for f in manifest), set(metadata.keys()))
for f in manifest:
self.assertIn('s3_etag', f)
# Extract the log method name and the first three words of log
# message logged. Note that the PyCharm debugger will call
# certain dunder methods on the variable, leading to failed
# assertions.
actual = [(m, ' '.join(re.split(r'[\s,]', a[0])[:3])) for m, a, k in captured_log.mock_calls]
if direct:
if replica == 'aws':
if fallback:
expected = [
('debug', 'Loading bundle %s'),
('debug', 'Loading object %s'),
('warning', 'Error accessing bundle'),
('warning', 'Failed getting bundle')
] + [
('debug', 'Loading file %s'),
('debug', 'Loading object %s'),
('warning', 'Error accessing file'),
('warning', 'Failed getting file')
] * len(metadata)
else:
expected = [
('debug', 'Loading bundle %s'),
('debug', 'Loading object %s')
] + [
('debug', 'Loading file %s'),
('debug', 'Loading object %s'), # file
('debug', 'Loading object %s') # blob
] * len(metadata)
else:
# On `gcp` the precondition check fails right away, preventing any attempts of direct access
expected = [
('warning', 'Failed getting bundle')
] + [
('warning', 'Failed getting file')
] * len(metadata)
else:
expected = []
self.assertSequenceEqual(sorted(expected), sorted(actual))
def test_get_file_fail(self):
for direct in {config.dss_direct_access, False}:
with self.subTest(direct=direct):
dss_client = azul.dss.direct_access_client() if direct else azul.dss.client()
with self.assertRaises(SwaggerAPIException) as e:
dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1',
version='2018-11-19T232756.056947Z',
replica='aws')
self.assertEqual(e.exception.reason, 'not_found')
def test_mini_dss_failures(self):
uuid = 'acafefed-beef-4bad-babe-feedfa11afe1'
version = '2018-11-19T232756.056947Z'
with self._failing_s3_get_object():
mini_dss = azul.dss.MiniDSS(config.dss_endpoint)
with self.assertRaises(self.SpecialError):
mini_dss._get_file_object(uuid, version)
with self.assertRaises(KeyError):
mini_dss._get_blob_key({})
with self.assertRaises(self.SpecialError):
mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'})
with self.assertRaises(self.SpecialError):
mini_dss.get_bundle(uuid, version, 'aws')
with self.assertRaises(self.SpecialError):
mini_dss.get_file(uuid, version, 'aws')
with self.assertRaises(self.SpecialError):
mini_dss.get_native_file_url(uuid, version, 'aws')
class AzulChaliceLocalIntegrationTest(AzulTestCase):
url = furl(scheme='http', host='127.0.0.1', port=8000)
server = None
server_thread = None
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
app_module = load_app_module('service')
app_dir = os.path.dirname(app_module.__file__)
factory = chalice.cli.factory.CLIFactory(app_dir)
config = factory.create_config_obj()
cls.server = factory.create_local_server(app_obj=app_module.app,
config=config,
host=cls.url.host,
port=cls.url.port)
cls.server_thread = threading.Thread(target=cls.server.serve_forever)
cls.server_thread.start()
@classmethod
def tearDownClass(cls) -> None:
cls.server.shutdown()
cls.server_thread.join()
super().tearDownClass()
def test_local_chalice_health_endpoint(self):
url = self.url.copy().set(path='health').url
response = requests.get(url)
self.assertEqual(200, response.status_code)
catalog = first(config.integration_test_catalogs.keys())
def test_local_chalice_index_endpoints(self):
url = self.url.copy().set(path='index/files',
query=dict(catalog=self.catalog)).url
response = requests.get(url)
self.assertEqual(200, response.status_code)
def test_local_filtered_index_endpoints(self):
filters = {'genusSpecies': {'is': ['Homo sapiens']}}
url = self.url.copy().set(path='index/files',
query=dict(filters=json.dumps(filters),
catalog=self.catalog)).url
response = requests.get(url)
self.assertEqual(200, response.status_code)
| 40.231386
| 117
| 0.553442
| 3,599
| 35,122
| 5.208113
| 0.184496
| 0.011737
| 0.015258
| 0.006402
| 0.188167
| 0.158237
| 0.128468
| 0.101312
| 0.070476
| 0.056498
| 0
| 0.008481
| 0.355447
| 35,122
| 872
| 118
| 40.277523
| 0.819507
| 0.066767
| 0
| 0.152797
| 0
| 0.001364
| 0.073991
| 0.008847
| 0
| 0
| 0
| 0.001147
| 0.066849
| 1
| 0.068213
| false
| 0.002729
| 0.060027
| 0.006821
| 0.177353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|