function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def admin_remove_flag():
"""
In GET request
- Deletes the flag for the respective video ID.
"""
if request.method == 'GET':
if 'user' in session:
is_admin = (requests.get(url='http://127.0.0.1:8080/is-admin/{}'.format(session['user'])).content).decode("utf-8") # Done
if is_admin == "True":
video_ID = request.args.get('v')
requests.post(url='http://127.0.0.1:8080/remove-flag', data={'video_ID' : video_ID})
return redirect(url_for('flagged_videos'))
else:
abort(403)
else:
return redirect(url_for('login_form')) | sharadbhat/Video-Sharing-Platform | [
74,
30,
74,
5,
1507245196
] |
def text_of(relpath):
"""
Return string containing the contents of the file at *relpath* relative to
this file.
"""
thisdir = os.path.dirname(__file__)
file_path = os.path.join(thisdir, os.path.normpath(relpath))
with open(file_path) as f:
text = f.read()
return text | python-openxml/python-docx | [
3476,
948,
3476,
595,
1381848641
] |
def __init__(self, env, model_directory: str, save_directory: str, **usercfg) -> None:
super(ModelRunner, self).__init__()
self.env = env
self.model_directory = model_directory
self.save_directory = save_directory
self.config = dict(
episode_max_length=self.env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps'),
repeat_n_actions=1
)
self.config.update(usercfg)
self.session = tf.Session()
self.saver = tf.train.import_meta_graph(os.path.join(self.model_directory, "model.meta"))
self.saver.restore(self.session, os.path.join(self.model_directory, "model"))
self.action = tf.get_collection("action")[0]
self.states = tf.get_collection("states")[0] | arnomoonens/DeepRL | [
84,
28,
84,
1,
1472985976
] |
def get_trajectory(self, render: bool = False):
"""
Run agent-environment loop for one whole episode (trajectory)
Return dictionary of results
"""
state = self.env.reset()
for _ in range(self.config["episode_max_length"]):
action = self.choose_action(state)
for _ in range(self.config["repeat_n_actions"]):
_, _, done, _ = self.env.step(action)
if done: # Don't continue if episode has already ended
break
if done:
break
if render:
self.env.render()
return | arnomoonens/DeepRL | [
84,
28,
84,
1,
1472985976
] |
def main():
args = parser.parse_args()
env = make(args.environment)
runner = ModelRunner(env, args.model_directory, args.save_directory, n_iter=args.iterations)
try:
runner.env = wrappers.Monitor(runner.env, args.save_directory, video_callable=False, force=True)
runner.run()
except KeyboardInterrupt:
pass | arnomoonens/DeepRL | [
84,
28,
84,
1,
1472985976
] |
def __init__(self, params, normalize=False, whiten=True):
self.model_id = common.get_next_model_id()
self.norm = normalize
self.whiten = whiten
self.x_path = '%s_%sx%s' % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
self.y_path = '%s_%s_%s' % (params['dataset']['fact'],params['dataset']['dim'],params['dataset']['dataset'])
self.dataset_settings = params['dataset']
self.training_params = params['training']
self.model_arch = params['cnn']
self.predicting_params = params['predicting'] | sergiooramas/tartarus | [
98,
25,
98,
7,
1487801591
] |
def _squared_magnitude(x):
return tt.sqr(x).sum(axis=-1) | sergiooramas/tartarus | [
98,
25,
98,
7,
1487801591
] |
def cosine(x, y):
return tt.clip((1 - (x * y).sum(axis=-1) /
(_magnitude(x) * _magnitude(y))) / 2, 0, 1) | sergiooramas/tartarus | [
98,
25,
98,
7,
1487801591
] |
def build_model(config):
"""Builds the cnn."""
params = config.model_arch
get_model = getattr(models, 'get_model_'+str(params['architecture']))
model = get_model(params)
#model = model_kenun.build_convnet_model(params)
# Learning setup
t_params = config.training_params
sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
momentum=t_params["momentum"], nesterov=t_params["nesterov"])
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
optimizer = eval(t_params['optimizer'])
metrics = ['mean_squared_error']
if config.model_arch["final_activation"] == 'softmax':
metrics.append('categorical_accuracy')
if t_params['loss_func'] == 'cosine':
loss_func = eval(t_params['loss_func'])
else:
loss_func = t_params['loss_func']
model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)
return model | sergiooramas/tartarus | [
98,
25,
98,
7,
1487801591
] |
def load_data_hf5(params,val_percent, test_percent):
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%s.hdf5" % (params['dataset']['dataset'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
N = f["targets"].shape[0]
f.close()
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
X_train = HDF5Matrix(hdf5_file, 'features', start=0, end=N_train)
Y_train = HDF5Matrix(hdf5_file, 'targets', start=0, end=N_train)
X_val = HDF5Matrix(hdf5_file, 'features', start=N_train, end=N_train+N_val)
Y_val = HDF5Matrix(hdf5_file, 'targets', start=N_train, end=N_train+N_val)
X_test = HDF5Matrix(hdf5_file, 'features', start=N_train+N_val, end=N)
Y_test = HDF5Matrix(hdf5_file, 'targets', start=N_train+N_val, end=N)
return X_train, Y_train, X_val, Y_val, X_test, Y_test, N_train | sergiooramas/tartarus | [
98,
25,
98,
7,
1487801591
] |
def batch_block_generator(params, y_path, N_train, id2gt, X_meta=None,
val_from_file=False):
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
block_step = 50000
batch_size = params['training']['n_minibatch']
randomize = True
with_meta = False
if X_meta != None:
with_meta = True
while 1:
for i in range(0, N_train, block_step):
x_block = f['features'][i:min(N_train, i+block_step)]
index_block = f['index'][i:min(N_train, i+block_step)]
#y_block = f['targets'][i:min(N_train,i+block_step)]
x_block = np.delete(x_block, np.where(index_block == ""), axis=0)
index_block = np.delete(index_block, np.where(index_block == ""))
y_block = np.asarray([id2gt[id] for id in index_block])
if params['training']['normalize_y']:
normalize(y_block, copy=False)
items_list = range(x_block.shape[0])
if randomize:
random.shuffle(items_list)
for j in range(0, len(items_list), batch_size):
if j+batch_size <= x_block.shape[0]:
items_in_batch = items_list[j:j+batch_size]
x_batch = x_block[items_in_batch]
y_batch = y_block[items_in_batch]
if with_meta:
x_batch = [x_batch, X_meta[items_in_batch]]
yield (x_batch, y_batch) | sergiooramas/tartarus | [
98,
25,
98,
7,
1487801591
] |
def readme():
try:
readme = open('README.md')
except IOError:
return LONG_DESCRIPTION
else:
return readme.read() | wq/wq | [
233,
34,
233,
5,
1345664463
] |
def __init__(self, bins, sample_rate, fft_window='hann', fft_overlap=0.5,
crop_factor=0, log_scale=True, remove_dc=False, detrend=None,
lnb_lo=0, max_threads=0, max_queue_size=0):
self._bins = bins
self._sample_rate = sample_rate
self._fft_window = fft_window
self._fft_overlap = fft_overlap
self._fft_overlap_bins = math.floor(self._bins * self._fft_overlap)
self._crop_factor = crop_factor
self._log_scale = log_scale
self._remove_dc = remove_dc
self._detrend = detrend
self._lnb_lo = lnb_lo
self._executor = threadpool.ThreadPoolExecutor(
max_workers=max_threads,
max_queue_size=max_queue_size,
thread_name_prefix='PSD_thread'
)
self._base_freq_array = numpy.fft.fftfreq(self._bins, 1 / self._sample_rate) | xmikos/soapy_power | [
132,
31,
132,
25,
1487207652
] |
def result(self, psd_state):
"""Return freqs and averaged PSD for given center frequency"""
freq_array = numpy.fft.fftshift(psd_state['freq_array'])
pwr_array = numpy.fft.fftshift(psd_state['pwr_array'])
if self._crop_factor:
crop_bins_half = round((self._crop_factor * self._bins) / 2)
freq_array = freq_array[crop_bins_half:-crop_bins_half]
pwr_array = pwr_array[crop_bins_half:-crop_bins_half]
if psd_state['repeats'] > 1:
pwr_array = pwr_array / psd_state['repeats']
if self._log_scale:
pwr_array = 10 * numpy.log10(pwr_array)
return (freq_array, pwr_array) | xmikos/soapy_power | [
132,
31,
132,
25,
1487207652
] |
def result_async(self, psd_state):
"""Return freqs and averaged PSD for given center frequency (asynchronously in another thread)"""
return self._executor.submit(self.wait_for_result, psd_state) | xmikos/soapy_power | [
132,
31,
132,
25,
1487207652
] |
def update(self, psd_state, samples_array):
"""Compute PSD from samples and update average for given center frequency"""
freq_array, pwr_array = simplespectral.welch(samples_array, self._sample_rate, nperseg=self._bins,
window=self._fft_window, noverlap=self._fft_overlap_bins,
detrend=self._detrend)
if self._remove_dc:
pwr_array[0] = (pwr_array[1] + pwr_array[-1]) / 2
with psd_state['update_lock']:
psd_state['repeats'] += 1
if psd_state['pwr_array'] is None:
psd_state['pwr_array'] = pwr_array
else:
psd_state['pwr_array'] += pwr_array | xmikos/soapy_power | [
132,
31,
132,
25,
1487207652
] |
def __init__(self,domain='ram.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AccountSpace = None
self.UserName = None | francisar/rds_manager | [
11,
11,
11,
1,
1448422655
] |
def get_context_data(self, **kwargs):
context = super(SubmissionListView, self).get_context_data(**kwargs)
submissions_table = SubmissionTable(self.get_queryset())
RequestConfig(self.request).configure(submissions_table)
# add filter here
context['submissions_table'] = submissions_table
return context | wangzitian0/BOJ-V4 | [
4,
5,
4,
3,
1436032599
] |
def get_context_data(self, **kwargs):
context = super(SubmissionDetailView, self).get_context_data(**kwargs)
return context | wangzitian0/BOJ-V4 | [
4,
5,
4,
3,
1436032599
] |
def dispatch(self, request, pid=None, *args, **kwargs):
pid = self.kwargs['pid']
self.problem = get_object_or_404(Problem.objects.all(), pk=pid)
return super(SubmissionCreateView, self).dispatch(request, *args, **kwargs) | wangzitian0/BOJ-V4 | [
4,
5,
4,
3,
1436032599
] |
def get_context_data(self, **kwargs):
context = super(SubmissionCreateView, self).get_context_data(**kwargs)
context['problem'] = self.problem
return context | wangzitian0/BOJ-V4 | [
4,
5,
4,
3,
1436032599
] |
def main(path_output, s1, s2, dt):
# Check if the file with couplings exists
if not os.path.isfile('couplings.npy'):
# Check all the files stored
files_im = glob.glob('Ham_*_im')
# Read the couplings
couplings = np.stack(
np.loadtxt(f'Ham_{f}_im') for f in range(len(files_im)))
# Save the file for fast reading afterwards
np.save('couplings', couplings)
else:
couplings = np.load('couplings.npy')
ts = np.arange(couplings.shape[0]) * dt
plt.plot(ts, couplings[:, s1, s2] * r2meV)
plt.xlabel('Time (fs)')
plt.ylabel('Energy (meV)')
plt.show() | felipeZ/nonAdiabaticCoupling | [
7,
10,
7,
9,
1465559023
] |
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def config_paths():
import bids.config
return bids.config.get_option('config_paths').copy() | INCF/pybids | [
182,
106,
182,
100,
1468076907
] |
def extension_initial_dot():
import bids.config
return bids.config.get_option('extension_initial_dot') | INCF/pybids | [
182,
106,
182,
100,
1468076907
] |
def __init__(self, identifier=None, container=None):
tlib.Node.__init__(self, identifier=identifier)
self._childs = []
self._parent = None
self._birth_time = None
self._division_time = None
self._sdata = {} # dictionary to contain computed data
self._protected_against_build = set() # set of obs not to re-build
self.container = container # point to Container instance
# cells are built from a specific container instance
# container can be a given field of view, a channel, a microcolony, ...
return | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def childs(self):
"Get list of child instances."
return self._childs | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def childs(self, value):
if value is None:
self._childs = []
elif isinstance(value, list):
for item in value:
self.childs = item
elif isinstance(value, Cell):
self._childs.append(value)
else:
raise CellChildsError | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def parent(self):
"Get parent instance."
return self._parent | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def parent(self, pcell):
if pcell is None:
self._parent = None
elif isinstance(pcell, Cell):
self._parent = pcell
else:
raise CellParentError | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def birth_time(self):
"Get cell cycle start time. See below for Setter."
return self._birth_time | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def birth_time(self, value):
"Set cell cycle start time. See above for Getter."
self._birth_time = value | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def division_time(self):
"Get cell cycle end time. See below for Setter."
return self._division_time | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def division_time(self, value):
"Set cell cycle end time. See above for Getter."
if self.birth_time is not None:
if value < self.birth_time:
raise CellDivisionError
self._division_time = value | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def __repr__(self):
cid = str(self.identifier)
if self.parent:
pid = str(self.parent.identifier)
else:
pid = '-'
if self.childs:
ch = ','.join(['{}'.format(c.identifier) for c in self.childs])
else:
ch = '-'
return cid+';p:'+pid+';ch:'+ch | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def protect_against_build(self, obs):
"""Protect current cell against building obs array/value"""
self._protected_against_build.add(obs)
return | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def build_timelapse(self, obs):
"""Builds timeseries corresponding to observable of mode 'dynamics'.
Result is an array of same length as time array, stored in a dictionary
_sdata, which keys are obs.label. When using sliding windows,
estimate in a given cell actualize data in its parent cell, if and only
if it has not been actualized before (check disjoint time intervals).
Parameters
----------
obs : Observable instance
mode must be 'dynamics'
Note
-----
Some observables carry the 'local_fit' option True. In this case,
local fits over shifting time-windows are performed. If one would keep
only a given cell's data, then the constraints on shifting time-window
would let some 'empty' times, at which no evaluation can be performed.
This is solved by getting data from the cell's parent cell's data. This
operation computes time-window fiited data in the cell's parent cycle.
Two precautions must then be taken:
1. a given cell's data must be used only once for evaluating parent
cell's data,
2. when data has been used from one daughter cell, concatenate
the current cell's evaluated data to it.
.. warning::
For some computations, the time interval between consecutive
acquisitions is needed. If it's defined in the container or the
experiment metadata, this parameter will be imported; otherwise if
there are at least 2 consecutive values, it will be inferred from
data (at the risk of making mistakes if there are too many missing
values)
"""
label = str(obs.label)
raw = obs.raw
coords = Coordinates(self.data['time'], self.data[raw])
if self.parent is not None and len(self.parent.data) > 0:
anteriors = Coordinates(self.parent.data['time'],
self.parent.data[raw])
else:
anteriors = Coordinates(np.array([], dtype=float),
np.array([], dtype=float))
# if empty, return empty array of appropriate type
if len(self.data) == 0: # there is no data, but it has some dtype
return Coordinates(np.array([], dtype=float),
np.array([], dtype=float))
dt = self.container.period
if dt is None:
# automatically finds dt
if len(self.data) > 1:
arr = self.data['time']
time_increments = arr[1:] - arr[:-1]
dt = np.round(np.amin(np.abs(time_increments)), decimals=2)
# case : no local fit, use data, or finite differences
if not obs.local_fit:
if obs.differentiate:
if obs.scale == 'linear':
new = derivative(coords)
elif obs.scale == 'log':
new = logderivative(coords) | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def compute_cyclized(self, obs):
"""Computes observable when mode is different from 'dynamics'.
Parameters
----------
obs : Observable instance
mode must be different from 'dynamics'
Raises
------
ValueError
when Observable mode is 'dynamics'
Note
----
To compute a cell-cycle observable (e.g. birth growth rate), it is
necessary to know the value of the timelapse counterpart (e.g. growth
rate here). The timelapse observable may work by joining values at
divisions, and hence a single call to Cell.build_timelapse() will
result in a different result array than when it has beenalso called in
a daughter cell (potentially affecting values toward the end of current
cell cycle). Hence, in that circumstances when continuity is used to
join timeseries at divisions, enhancing results with fitting
over sliding windows, it is the user's task to compute first the
timelapse observable over the entire lineage, and only then, evaluate
cell-cycle values. This is why the function below tries first to read
an already present array from timelapse counterpart, and only if it
fails will it compute it using only this current cell data.
"""
scale = obs.scale
npts = obs.join_points
label = obs.label
if obs.mode == 'dynamics':
raise ValueError('Called build_cyclized for dynamics mode')
# associate timelapse counterpart
cobs = obs.as_timelapse()
clabel = cobs.label
time = self.data['time']
# if it has been computed already, the clabel key exists in sdata
try:
array = self._sdata[clabel]
# otherwise compute the timelapse counterpart
except KeyError:
self.build_timelapse(cobs)
array = self._sdata[clabel]
# get value
try:
if obs.mode == 'birth':
value = extrapolate_endpoints(time, array, self.birth_time,
scale=scale, join_points=npts)
elif obs.mode == 'division':
value = extrapolate_endpoints(time, array, self.division_time,
scale=scale, join_points=npts)
elif 'net-increase' in obs.mode:
dval = extrapolate_endpoints(time, array, self.division_time,
scale=scale, join_points=npts)
bval = extrapolate_endpoints(time, array, self.birth_time,
scale=scale, join_points=npts)
if obs.mode == 'net-increase-additive':
value = dval - bval
elif obs.mode == 'net-increase-multiplicative':
value = dval/bval
elif obs.mode == 'average':
value = np.nanmean(array)
elif obs.mode == 'rate':
if len(array) < 2:
value = np.nan # not enough values to estimate rate
if obs.scale == 'log':
array = np.log(array)
value, intercept = np.polyfit(time, array, 1)
except ExtrapolationError as err: | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def _disjoint_time_sets(ts1, ts2):
if len(ts1) == 0 or len(ts2) == 0:
return True
min1, min2 = map(np.nanmin, [ts1, ts2])
max1, max2 = map(np.nanmax, [ts1, ts2])
return max1 < min2 or max2 < min1 | LeBarbouze/tunacell | [
1,
5,
1,
1,
1502214471
] |
def get_cache_name(prefix, name):
"""
Cache name constructor. Uses the same methods as django cache system
Examples:
*) prefix=profile.cache, name=<requestuser.id>
*) prefix=template.cache.sidebar, name=<requestuser.id>
"""
return '{0}.{1}'.format(
prefix, hashlib.md5(six.text_type(name).encode('utf-8')).hexdigest()
) | Open-E-WEB/django-powerpages | [
8,
3,
8,
1,
1475848727
] |
def rendered_source_for_user(page_pk, user_id):
"""Create cache key for rendered page source based on current user"""
return 'powerpages:rendered_source_user:{0}:{1}'.format(page_pk, user_id) | Open-E-WEB/django-powerpages | [
8,
3,
8,
1,
1475848727
] |
def setplot(plotdata=None):
""""""
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
# clear any old figures,axes,items data
plotdata.clearfigures()
plotdata.format = 'ascii'
# Load data from output
clawdata = clawutil.ClawInputData(2)
clawdata.read(os.path.join(plotdata.outdir, 'claw.data'))
physics = geodata.GeoClawData()
physics.read(os.path.join(plotdata.outdir, 'geoclaw.data'))
surge_data = geodata.SurgeData()
surge_data.read(os.path.join(plotdata.outdir, 'surge.data'))
friction_data = geodata.FrictionData()
friction_data.read(os.path.join(plotdata.outdir, 'friction.data'))
# Load storm track
track = surgeplot.track_data(os.path.join(plotdata.outdir, 'fort.track'))
# Set afteraxes function
def surge_afteraxes(cd):
surgeplot.surge_afteraxes(cd, track, plot_direction=False,
kwargs={"markersize": 4})
# Color limits
surface_limits = [-5.0, 5.0]
speed_limits = [0.0, 3.0]
wind_limits = [0, 64]
pressure_limits = [935, 1013]
friction_bounds = [0.01, 0.04]
def friction_after_axes(cd):
plt.title(r"Manning's $n$ Coefficient")
# ==========================================================================
# Plot specifications
# ==========================================================================
regions = {"Gulf": {"xlimits": (clawdata.lower[0], clawdata.upper[0]),
"ylimits": (clawdata.lower[1], clawdata.upper[1]),
"figsize": (6.4, 4.8)},
"Texas Gulf Coast": {"xlimits": (-99.2, -94.2),
"ylimits": (26.4, 30.4),
"figsize": (6, 6)}}
for (name, region_dict) in regions.items():
# Surface Figure
plotfigure = plotdata.new_plotfigure(name="Surface - %s" % name)
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Surface"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
# Speed Figure
plotfigure = plotdata.new_plotfigure(name="Currents - %s" % name)
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Currents"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
surgeplot.add_speed(plotaxes, bounds=speed_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['speed'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#
# Friction field
#
plotfigure = plotdata.new_plotfigure(name='Friction')
plotfigure.show = friction_data.variable_friction and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
# plotaxes.title = "Manning's N Coefficient"
plotaxes.afteraxes = friction_after_axes
plotaxes.scaled = True
surgeplot.add_friction(plotaxes, bounds=friction_bounds, shrink=0.9)
plotaxes.plotitem_dict['friction'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['friction'].colorbar_label = "$n$"
#
# Hurricane Forcing fields
#
# Pressure field
plotfigure = plotdata.new_plotfigure(name='Pressure')
plotfigure.show = surge_data.pressure_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
plotaxes.title = "Pressure Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_pressure(plotaxes, bounds=pressure_limits)
surgeplot.add_land(plotaxes)
# Wind field
plotfigure = plotdata.new_plotfigure(name='Wind Speed')
plotfigure.show = surge_data.wind_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
plotaxes.title = "Wind Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_wind(plotaxes, bounds=wind_limits)
surgeplot.add_land(plotaxes)
# ========================================================================
# Figures for gauges
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Gauge Surfaces', figno=300,
type='each_gauge')
plotfigure.show = True
plotfigure.clf_each_gauge = True | mandli/surge-examples | [
7,
45,
7,
4,
1416590040
] |
def days2seconds(days):
return days * 60.0**2 * 24.0 | mandli/surge-examples | [
7,
45,
7,
4,
1416590040
] |
def get_actual_water_levels(station_id):
# Fetch water levels and tide predictions for given station
date_time, water_level, tide = fetch_noaa_tide_data(station_id,
begin_date, end_date)
# Calculate times relative to landfall
seconds_rel_landfall = (date_time - landfall_time) / numpy.timedelta64(1, 's')
# Subtract tide predictions from measured water levels
water_level -= tide
return seconds_rel_landfall, water_level | mandli/surge-examples | [
7,
45,
7,
4,
1416590040
] |
def gauge_afteraxes(cd):
station_id, station_name = stations[cd.gaugeno - 1]
seconds_rel_landfall, actual_level = get_actual_water_levels(station_id)
axes = plt.gca()
surgeplot.plot_landfall_gauge(cd.gaugesoln, axes)
axes.plot(seconds_rel_landfall, actual_level, 'g')
# Fix up plot - in particular fix time labels
axes.set_title(station_name)
axes.set_xlabel('Seconds relative to landfall')
axes.set_ylabel('Surface (m)')
axes.set_xlim([days2seconds(-1), days2seconds(3)])
axes.set_ylim([-1, 5])
axes.set_xticks([-days2seconds(-1), 0, days2seconds(1), days2seconds(2), days2seconds(3)])
#axes.set_xticklabels([r"$-1$", r"$0$", r"$1$", r"$2$", r"$3$"])
#axes.grid(True) | mandli/surge-examples | [
7,
45,
7,
4,
1416590040
] |
def gauge_location_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos='all',
format_string='ko', add_labels=False) | mandli/surge-examples | [
7,
45,
7,
4,
1416590040
] |
def create_users_table(dynamodb):
dynamodb.create_table(
TableName='users',
KeySchema=[{
'AttributeName': 'username',
'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'username',
'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 9,
'WriteCapacityUnits': 9
}
) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def dynamodb_mock():
import moto
with moto.mock_dynamodb2():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
create_users_table(dynamodb)
yield dynamodb | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def dynamodb():
dynamodb = boto3.resource('dynamodb', **DYNAMODB_CONFIG)
try:
dynamodb.Table('users').delete()
except ClientError as error:
# you can not just use ResourceNotFoundException class
# to catch an error since it doesn't exist until it's raised
if error.__class__.__name__ != 'ResourceNotFoundException':
raise
create_users_table(dynamodb)
# waiting until the table exists
dynamodb.meta.client.get_waiter('table_exists').wait(TableName='users')
return dynamodb | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def s3_mock():
import moto
with moto.mock_s3():
s3 = boto3.client('s3', region_name='us-east-1')
yield s3 | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def s3():
return boto3.client('s3', **S3_CONFIG) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def patch_boto3():
boto3_hooks.install_patches()
try:
yield
finally:
boto3_hooks.reset_patches() | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def _test_dynamodb(dynamodb, tracer):
users = dynamodb.Table('users')
response = users.put_item(Item={
'username': 'janedoe',
'first_name': 'Jane',
'last_name': 'Doe',
})
assert_last_span('resource', 'dynamodb', 'put_item', tracer, response)
response = users.get_item(Key={'username': 'janedoe'})
user = response['Item']
assert user['first_name'] == 'Jane'
assert user['last_name'] == 'Doe'
assert_last_span('resource', 'dynamodb', 'get_item', tracer, response)
try:
dynamodb.Table('test').delete_item(Key={'username': 'janedoe'})
except ClientError as error:
response = error.response
assert_last_span('resource', 'dynamodb', 'delete_item', tracer, response)
response = users.creation_date_time
assert isinstance(response, datetime.datetime)
assert_last_span('resource', 'dynamodb', 'describe_table', tracer) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def is_service_running(endpoint_url, expected_status_code):
try:
# feel free to suggest better solution for this check
response = requests.get(endpoint_url, timeout=1)
return response.status_code == expected_status_code
except requests.exceptions.ConnectionError:
return False | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def is_s3_running():
return is_service_running(S3_ENDPOINT_URL, 200) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def test_boto3_dynamodb(thread_safe_tracer, dynamodb):
_test_dynamodb(dynamodb, thread_safe_tracer) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def test_boto3_dynamodb_with_moto(thread_safe_tracer, dynamodb_mock):
_test_dynamodb(dynamodb_mock, thread_safe_tracer) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def test_boto3_s3(s3, thread_safe_tracer):
_test_s3(s3, thread_safe_tracer) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def test_boto3_s3_with_moto(s3_mock, thread_safe_tracer):
_test_s3(s3_mock, thread_safe_tracer) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def test_boto3_s3_missing_func_instrumentation(capture):
class Patcher(boto3_hooks.Boto3Patcher):
S3_FUNCTIONS_TO_INSTRUMENT = 'missing_func',
Patcher().install_patches()
capture.check(('root', 'WARNING', 'S3 function missing_func not found')) | uber-common/opentracing-python-instrumentation | [
164,
57,
164,
22,
1452851374
] |
def create_deterministic_address_bcrt1_p2tr_op_true():
"""
Generates a deterministic bech32m address (segwit v1 output) that
can be spent with a witness stack of OP_TRUE and the control block
with internal public key (script-path spending).
Returns a tuple with the generated address and the internal key.
"""
internal_key = (1).to_bytes(32, 'big')
scriptPubKey = taproot_construct(internal_key, [(None, CScript([OP_TRUE]))]).scriptPubKey
address = encode_segwit_address("bcrt", 1, scriptPubKey[2:])
assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka')
return (address, internal_key) | syscoin/syscoin | [
164,
70,
164,
5,
1456717652
] |
def base58_to_byte(s):
"""Converts a base58-encoded string to its data and version.
Throws if the base58 checksum is invalid."""
if not s:
return b''
n = 0
for c in s:
n *= 58
assert c in chars
digit = chars.index(c)
n += digit
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = n.to_bytes((n.bit_length() + 7) // 8, 'big')
pad = 0
for c in s:
if c == chars[0]:
pad += 1
else:
break
res = b'\x00' * pad + res
# Assert if the checksum is invalid
assert_equal(hash256(res[:-4])[:4], res[-4:])
return res[1:-4], int(res[0]) | syscoin/syscoin | [
164,
70,
164,
5,
1456717652
] |
def scripthash_to_p2sh(hash, main=False):
assert len(hash) == 20
version = 5 if main else 196
return byte_to_base58(hash, version) | syscoin/syscoin | [
164,
70,
164,
5,
1456717652
] |
def script_to_p2sh(script, main=False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main) | syscoin/syscoin | [
164,
70,
164,
5,
1456717652
] |
def program_to_witness(version, program, main=False):
if (type(program) is str):
program = bytes.fromhex(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return encode_segwit_address("sys" if main else "bcrt", version, program) | syscoin/syscoin | [
164,
70,
164,
5,
1456717652
] |
def key_to_p2wpkh(key, main=False):
key = check_key(key)
return program_to_witness(0, hash160(key), main) | syscoin/syscoin | [
164,
70,
164,
5,
1456717652
] |
def check_key(key):
if (type(key) is str):
key = bytes.fromhex(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False | syscoin/syscoin | [
164,
70,
164,
5,
1456717652
] |
def query_processor(query):
if not query.condition is None:
query.condition.conditionName = adapter_for_query(query) | theshammy/GenAn | [
2,
1,
2,
5,
1454631245
] |
def adapter_for_query(queryObject):
try:
return {
'lowerThan': 'lt',
'greaterThan': 'gt',
'lessEqual': 'le',
'greaterEqual': 'ge',
'equal': 'e'
}[queryObject.condition.conditionName]
except:
return queryObject.condition.conditionName | theshammy/GenAn | [
2,
1,
2,
5,
1454631245
] |
def test_showmigrations_command_override(mocker):
mock_django_handle = mocker.patch(
'django.core.management.commands.showmigrations.Command.handle')
mock_show_migrations = mocker.patch(
'septentrion.show_migrations', return_value=b'')
call_command('showmigrations')
assert mock_django_handle.called is False
assert mock_show_migrations.called is True | novafloss/django-north | [
9,
5,
9,
3,
1489395266
] |
def test_north_manage_migrations(mocker, settings, manage):
if manage is not None:
settings.NORTH_MANAGE_DB = manage
if manage is None and hasattr(settings, 'NORTH_MANAGE_DB'):
del settings.NORTH_MANAGE_DB
mock = mocker.patch('septentrion.show_migrations', return_value=b'')
call_command('showmigrations')
assert mock.called == bool(manage) | novafloss/django-north | [
9,
5,
9,
3,
1489395266
] |
def test_mnist_tutorial_keras(self):
import tensorflow as tf
from cleverhans_tutorials import mnist_tutorial_keras
# Run the MNIST tutorial on a dataset of reduced size
test_dataset_indices = {'train_start': 0,
'train_end': 5000,
'test_start': 0,
'test_end': 333,
'nb_epochs': 2,
'testing': True}
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report = mnist_tutorial_keras.mnist_tutorial(**test_dataset_indices)
# Check accuracy values contained in the AccuracyReport object
self.assertTrue(report.train_clean_train_clean_eval > 0.90)
self.assertTrue(report.train_clean_train_adv_eval < 0.05)
self.assertTrue(report.train_adv_train_clean_eval > 0.90)
self.assertTrue(report.train_adv_train_adv_eval > 0.30)
atol_fac = 5e-2 if HAS_GPU else 1e-6
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report_2 = mnist_tutorial_keras.mnist_tutorial(**test_dataset_indices)
self.assertClose(report.train_clean_train_clean_eval,
report_2.train_clean_train_clean_eval,
atol=atol_fac * 1)
self.assertClose(report.train_clean_train_adv_eval,
report_2.train_clean_train_adv_eval,
atol=atol_fac * 1)
self.assertClose(report.train_adv_train_clean_eval,
report_2.train_adv_train_clean_eval,
atol=atol_fac * 1)
self.assertClose(report.train_adv_train_adv_eval,
report_2.train_adv_train_adv_eval,
atol=atol_fac * 1) | openai/cleverhans | [
5732,
1384,
5732,
39,
1473899284
] |
def __init__(self, biotool):
"""
:param biotool: Biotool object
:type biotool: :class:`tooldog.biotool_model.Biotool`
"""
self.biotool = biotool | khillion/ToolDog | [
9,
6,
9,
7,
1485181165
] |
def _get_from_repository(self, url):
"""
Get source code from a repository link
:param url: url of the repository
:type url: STRING
"""
# Here we deal with repository, have to use regex to test the url and
# use appropriate strategy to get the code depending the type of repository
if "github.com" in url:
return self._get_from_github(url)
else:
LOGGER.warn("The url ({}) is not a Github url".format(url))
LOGGER.warn("ToolDog only deals with Github repository for the moment...") | khillion/ToolDog | [
9,
6,
9,
7,
1485181165
] |
def _get_from_source_code(self, url):
"""
Get source code from a source code link
:param url: url of the source code
:type url: STRING
"""
return None | khillion/ToolDog | [
9,
6,
9,
7,
1485181165
] |
def test_to_csv(acoustic_utt_config, export_test_dir):
export_path = os.path.join(export_test_dir, 'results_export.csv')
with CorpusContext(acoustic_utt_config) as g:
q = g.query_graph(g.phone).filter(g.phone.label == 'aa')
q = q.columns(g.phone.label.column_name('label'),
g.phone.duration.column_name('duration'),
g.phone.begin.column_name('begin'))
q = q.order_by(g.phone.begin.column_name('begin'))
q.to_csv(export_path)
# ignore ids
expected = [['label', 'duration', 'begin'],
['aa', 0.0783100000000001, 2.70424],
['aa', 0.12199999999999989, 9.32077],
['aa', 0.03981000000000279, 24.56029]]
with open(export_path, 'r') as f:
i = 0
for line in f.readlines():
line = line.strip()
if line == '':
continue
line = line.split(',')
print(line)
if i != 0:
line = [line[0], float(line[1]), float(line[2])]
assert line[0] == expected[i][0]
assert line[1:] == pytest.approx(expected[i][1:], 1e-3)
else:
assert line == expected[i]
i += 1
with CorpusContext(acoustic_utt_config) as g:
q = g.query_graph(g.phone).filter(g.phone.label == 'aa')
q = q.columns(g.phone.label,
g.phone.duration,
g.phone.begin)
q = q.order_by(g.phone.begin)
q.to_csv(export_path)
# ignore ids
expected = [['node_phone_label', 'node_phone_duration', 'node_phone_begin'],
['aa', 0.0783100000000001, 2.70424],
['aa', 0.12199999999999989, 9.32077],
['aa', 0.03981000000000279, 24.56029]]
with open(export_path, 'r') as f:
i = 0
for line in f.readlines():
line = line.strip()
print(line)
if line == '':
continue
line = line.split(',')
print(line)
if i != 0:
line = [line[0], float(line[1]), float(line[2])]
assert line[0] == expected[i][0]
assert line[1:] == pytest.approx(expected[i][1:], 1e-3)
else:
assert line == expected[i]
i += 1 | PhonologicalCorpusTools/PyAnnotationGraph | [
27,
13,
27,
30,
1426715229
] |
def file_exists(ls,file):
for f in ls:
if(f==file):
return 1
return 0 | EichlerLab/read_depth_genotyper | [
1,
1,
1,
1,
1575936730
] |
def __init__(self,name,chr,start,end,TID):
self.name = name
self.chr = chr
self.start = start
self.end = end
self.frequencies_by_pop = {}
self.cps_by_genome = {}
self.transcript_id = TID
self.TID = TID
self.cps_all = []
self.pop_by_genome = {} | EichlerLab/read_depth_genotyper | [
1,
1,
1,
1,
1575936730
] |
def make_output_file(region,region_info,outdir,cell_line_info,genome_info):
outfile_name = "%s/%s_pop_summary.csv"%(outdir,region_info.name)
FOUT = open(outfile_name,'w')
FOUT.write("indiv,cp,pop,cell lines fixed, cell lines in Nitrogen,coverage\n")
for indiv,cp in region_info.cps_by_genome.iteritems():
pop = region_info.pop_by_genome[indiv]
output = indiv in cell_line_info and cell_line_info[indiv] or ""
output = "%s,%d,%s,%s,%f\n"%(indiv,cp,pop,output,genome_info.genomes[indiv].coverage)
FOUT.write(output)
print output | EichlerLab/read_depth_genotyper | [
1,
1,
1,
1,
1575936730
] |
def make_histogram(region,region_info,outdir,great_ape_gene_hashes):
print region_info.name
plt.rc('grid',color='0.75',linestyle='l',linewidth='0.1')
f=plt.figure()
f.set_figwidth(10)
f.set_figheight(10)
nbins=0
mx=0
mn=100 | EichlerLab/read_depth_genotyper | [
1,
1,
1,
1,
1575936730
] |
def load_plot_regions(fn_regions):
if fn_regions == None: return []
plot_regions = []
for line in open(fn_regions,'r').readlines():
if line[0] == "#": continue
print line
sline = line.split()
uID = "%s:"%(sline[1])
uID += ":".join(sline[2:5])
plot_regions.append(uID)
print uID
return plot_regions | EichlerLab/read_depth_genotyper | [
1,
1,
1,
1,
1575936730
] |
def get_cp_by_gene(gene_file): | EichlerLab/read_depth_genotyper | [
1,
1,
1,
1,
1575936730
] |
def get_calkan_cp_calls(fn_great_ape_cps_files):
calkan_cp_calls = {}
if(fn_great_ape_cps_files!=None):
for line in open(fn_great_ape_cps_files,'r').readlines():
(genome,gene_file) = line.split()
calkan_cp_calls[genome] = get_cp_by_gene(gene_file)
return calkan_cp_calls | EichlerLab/read_depth_genotyper | [
1,
1,
1,
1,
1575936730
] |
def db_connect(base, model_name='dss'):
try:
path = 'sqlite:///' + os.path.join(os.getcwd(), base, keyword_dbs[model_name] + '.sqlite')
except KeyError:
path = 'sqlite:///' + os.path.join(os.getcwd(), base, model_files[model_name].split(".")[0] + '.sqlite')
print("Connecting to: ", path)
return create_engine(path) | conferency/find-my-reviewers | [
9,
9,
9,
10,
1501409392
] |
def get_database(model_name, return_keyword=False):
engine = db_connect("databases", model_name=model_name)
Session = sessionmaker(bind=engine)
session = Session()
doc = "select * from documents"
auth = "select * from authors"
Author = toDataFrame(auth, session)
Author.index = Author.id
Document = toDataFrame(doc, session)
Document.index = Document.id
Key_Auth = '''
select authors_id, keywords_id, keyword, first_name, last_name
from keywords k, documents_keywords dk, documents_authors da, authors a, documents d
where a.id = da.authors_id and d.id = da.documents_id and d.id = dk.documents_id and k.id = dk.keywords_id
'''
Key_Auth_alt = '''
select authors_id, keywords_id, keyword, first_name, last_name
from keywords k, documents_keywords dk, documents_authors da, authors a, documents d
where a.id = da.authors_id and d.id = da.documents_id and d.id = dk.documents_id and k.id = dk.keywords_id
'''
tmpt = session.execute(Key_Auth)
KA = DataFrame(tmpt.fetchall(), columns=list(tmpt.keys()))
Docu_Auth = '''
select authors_id, documents_id, first_name, last_name, title
from authors a, documents b, documents_authors c
where a.id=c.authors_id and c.documents_id=b.id;
'''
tmpt = session.execute(Docu_Auth)
DA = DataFrame(tmpt.fetchall(), columns=list(tmpt.keys()))
Key_Freq = '''
select keywords.id, keyword, freqency
from (select keywords_id, count(*) freqency from documents_keywords group by keywords_id) a, keywords
where keywords.id = a.keywords_id
'''
a = session.execute(Key_Freq)
Keyword = DataFrame(a.fetchall(), columns=list(a.keys()))
Keyword.index = Keyword.id
DocNum = session.execute('select count(*) from documents').first()[0]
Keyword.loc[:, 'weight'] = np.log(DocNum / Keyword.freqency)
if not return_keyword:
return Author, Document, KA, DA
else:
return Author, Document, KA, DA, Keyword | conferency/find-my-reviewers | [
9,
9,
9,
10,
1501409392
] |
def add_arguments(self, parser):
parser.add_argument('--csv') | annapowellsmith/openpresc | [
91,
25,
91,
305,
1441229895
] |
def do_check(self, *results):
output = ''
self.result['level'] = 'GREEN'
for rows in results:
for row in rows:
self.result['level'] = 'YELLOW'
output += 'DBADM granted to %s\n' % (row[0])
if 'GREEN' == self.result['level']:
output = 'No users granted DBADM.'
self.result['output'] = output
return self.result | foospidy/DbDat | [
207,
49,
207,
2,
1450236309
] |
def shortlog(repo, hash, prefix=''):
commit = repo[hash]
log.info('%s%s: %s' % (prefix, hash, commit.subject)) | lfd/PaStA | [
32,
19,
32,
33,
1465239464
] |
def _mock_settings(Settings_mock):
Settings_mock.RGW_API_HOST = 'host'
Settings_mock.RGW_API_PORT = 42
Settings_mock.RGW_API_SCHEME = 'https'
Settings_mock.RGW_API_ADMIN_RESOURCE = 'ADMIN_RESOURCE'
Settings_mock.RGW_API_USER_ID = 'USER_ID'
Settings_mock.RGW_API_ACCESS_KEY = 'ak'
Settings_mock.RGW_API_SECRET_KEY = 'sk' | openattic/openattic | [
55,
9,
55,
5,
1498814953
] |
def test_load_settings(self, Settings_mock):
RGWClientTestCase._mock_settings(Settings_mock)
RGWClient._load_settings() # Also test import of awsauth.S3Auth
self.assertEqual(RGWClient._host, 'host')
self.assertEqual(RGWClient._port, 42)
self.assertEqual(RGWClient._ssl, True)
self.assertEqual(RGWClient._ADMIN_PATH, 'ADMIN_RESOURCE')
self.assertEqual(RGWClient._SYSTEM_USERID, 'USER_ID')
instance = RGWClient._user_instances[RGWClient._SYSTEM_USERID]
self.assertEqual(instance.userid, 'USER_ID') | openattic/openattic | [
55,
9,
55,
5,
1498814953
] |
def test_user_delete(self, Settings_mock):
make_default_admin()
self.assertTrue(self.client.login(username=settings.OAUSER, password='openattic'))
Settings_mock.RGW_API_USER_ID = 'admin'
response = self.client.delete('/api/ceph_radosgw/user/delete?uid=admin')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Can not delete the user', response.data['detail']) | openattic/openattic | [
55,
9,
55,
5,
1498814953
] |
def test_bucket_delete(self, filter_mock):
make_default_admin()
self.assertTrue(self.client.login(username=settings.OAUSER, password='openattic'))
filter_mock.return_value = [4, 8, 15, 16, 23, 42]
response = self.client.delete('/api/ceph_radosgw/bucket/delete?bucket=test01')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Can not delete the bucket', response.data['detail']) | openattic/openattic | [
55,
9,
55,
5,
1498814953
] |
def __init__(self, parent, id=-1, hideable=1):
self._close_toolbar_bitmap = gamera_icons.getToolbarCloseBitmap()
self._open_toolbar_bitmap = gamera_icons.getToolbarOpenBitmap()
self.controls = []
self.layout_update_controls = []
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
wx.Panel.__init__(
self, parent, id,
style=wx.CLIP_CHILDREN|wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetSizer(self.sizer)
self._closed = 0 | hsnr-gamera/gamera | [
38,
16,
38,
5,
1462722268
] |
def AddMenuTool(self, id, text, help_string, callback=None, toggle=0):
if not toggle:
button = buttons.GenBitmapTextButton(
self, id, None, text, size=wx.Size(48, 28))
else:
button = buttons.GenBitmapTextToggleButton(
self, id, None, text, size=wx.Size(48,28))
button.SetBitmapLabel(gamera_icons.getToolbarMenuBitmap())
button.SetBezelWidth(1)
button.SetUseFocusIndicator(False)
compat_wx.set_tool_tip(button, help_string)
if callback:
compat_wx.handle_event_1(self, wx.EVT_BUTTON, callback, id)
self.sizer.Add(button, flag=wx.ALIGN_CENTER)
self.sizer.SetSizeHints(self)
self.controls.append(button)
return button | hsnr-gamera/gamera | [
38,
16,
38,
5,
1462722268
] |
def AddSeparator(self):
self.sizer.Add(wx.Panel(self, -1, size=wx.Size(5, 2)))
self.sizer.SetSizeHints(self) | hsnr-gamera/gamera | [
38,
16,
38,
5,
1462722268
] |
def __init__(self, stopFunction, reconnectFunction):
self.stopFunction = stopFunction
self.reconnectFunction = reconnectFunction | tpainter/df_everywhere | [
13,
3,
13,
1,
1401129417
] |
def start(self):
self.terminator = 'q'
self.restart = 'r'
self.getKey = _Getch()
self.startReceiving() | tpainter/df_everywhere | [
13,
3,
13,
1,
1401129417
] |
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix() | tpainter/df_everywhere | [
13,
3,
13,
1,
1401129417
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.