id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3209185 | <reponame>sevagh/slicqt
#! /usr/bin/env python
# -*- coding: utf-8
"""
Python implementation of Non-Stationary Gabor Transform (NSGT)
derived from MATLAB code by NUHAG, University of Vienna, Austria
<NAME>, 2011-2016
http://grrrr.org/nsgt
"""
import numpy as np
import torch
from tqdm import tqdm
import os
from warnings import warn
from nsgt import NSGT_sliced, LogScale, LinScale, MelScale, OctScale, SndReader, BarkScale, VQLogScale
from nsgt_orig import NSGT_sliced as NSGT_sliced_old
import time
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("input", type=str, help="Input file")
parser.add_argument("--output", type=str, help="Output data file (.npz, .hd5, .pkl)")
parser.add_argument("--sr", type=int, default=44100, help="Sample rate used for the NSGT (default=%(default)s)")
parser.add_argument("--fmin", type=float, default=50, help="Minimum frequency in Hz (default=%(default)s)")
parser.add_argument("--fmax", type=float, default=22050, help="Maximum frequency in Hz (default=%(default)s)")
parser.add_argument("--scale", choices=('oct','cqlog','vqlog','mel','bark'), default='cqlog', help="Frequency scale oct, log, lin, or mel (default='%(default)s')")
parser.add_argument("--bins", type=int, default=50, help="Number of frequency bins (total or per octave, default=%(default)s)")
parser.add_argument("--real", action='store_true', help="Assume real signal")
parser.add_argument("--old", action='store_true', help="Use old transform")
parser.add_argument("--matrixform", action='store_true', help="Use regular time division over frequency bins (matrix form)")
parser.add_argument("--torch-device", type=str, help="Which pytorch device to use", default="cpu")
parser.add_argument("--reducedform", type=int, default=0, help="If real, omit bins for f=0 and f=fs/2 (--reducedform=1), or also the transition bands (--reducedform=2) (default=%(default)s)")
parser.add_argument("--multithreading", action='store_true', help="Use multithreading")
parser.add_argument("--bench-iter", type=int, default=1000, help="Benchmark iterations")
args = parser.parse_args()
if not os.path.exists(args.input):
parser.error("Input file '%s' not found"%args.input)
fs = args.sr
# build transform
scales = {'cqlog':LogScale, 'lin':LinScale, 'mel':MelScale, 'oct':OctScale, 'bark':BarkScale, 'vqlog':VQLogScale}
try:
scale = scales[args.scale]
except KeyError:
parser.error('Scale unknown (--scale option)')
scl = scale(args.fmin, args.fmax, args.bins, beyond=int(args.reducedform == 2))
sllen, trlen = scl.suggested_sllen_trlen(fs)
# Read audio data
sf = SndReader(args.input, sr=fs, chns=2)
# store generator into a list
signal_orig = list(sf())
if args.old:
slicq = NSGT_sliced_old(scl, sllen, trlen, fs,
real=True,
matrixform=args.matrixform,
multithreading=args.multithreading,
multichannel=True
)
# read slices from audio file and mix down signal, if necessary at all
signal = ((np.mean(s, axis=0),) for s in signal_orig)
else:
slicq = NSGT_sliced(scl, sllen, trlen, fs,
real=True,
matrixform=args.matrixform,
multichannel=True,
device=args.torch_device
)
signal = [torch.tensor(sig, device=args.torch_device) for sig in signal_orig]
pad = signal[0].shape[-1]-signal[-1].shape[-1]
signal[-1] = torch.nn.functional.pad(signal[-1], (0, pad), mode='constant', value=0)
signal = torch.cat(signal, dim=-1)
tot = 0.
for _ in tqdm(range(args.bench_iter)):
start = time.time()
if args.old:
# generator for forward transformation
c = slicq.forward(signal)
c_list = list(c)
sig_recon = slicq.backward(c_list)
sig = list(sig_recon)
else:
# torch
c = slicq.forward((signal,))
sig_recon = slicq.backward(c, signal.shape[-1])
tot += time.time() - start
# recreate generator
if args.old:
signal = ((np.mean(s, axis=0),) for s in signal_orig)
tot /= float(args.bench_iter)
print(f'total time: {tot:.2f}s')
| StarcoderdataPython |
1759552 | <reponame>chrisdavidmills/zamboni
from datetime import datetime, timedelta
import commonware.log
import cronjobs
import amo
from amo.utils import chunked
from devhub.models import ActivityLog
log = commonware.log.getLogger('z.cron')
@cronjobs.register
def mkt_gc(**kw):
"""Site-wide garbage collections."""
days_ago = lambda days: datetime.today() - timedelta(days=days)
log.debug('Collecting data to delete')
logs = (ActivityLog.objects.filter(created__lt=days_ago(90))
.exclude(action__in=amo.LOG_KEEP).values_list('id', flat=True))
for chunk in chunked(logs, 100):
chunk.sort()
log.debug('Deleting log entries: %s' % str(chunk))
amo.tasks.delete_logs.delay(chunk)
| StarcoderdataPython |
1726595 | from twilio.rest import Client
account_sid= "ACa8989bc5de6411f4d157b4466980911a"
auth_token = "<PASSWORD>"
client = Client(account_sid, auth_token)
client.messages.create(
to="915-503-4543",
from_="+19162521643",
body="Frankly, my dear, I don't give a damm."
)
| StarcoderdataPython |
1771926 | """Defines the class represents the scheduler configuration"""
from __future__ import unicode_literals
from queue.models import DEFAULT_QUEUE_ORDER
DEFAULT_NUM_MESSAGE_HANDLERS = 0
DEFAULT_LOGGING_LEVEL = 'INFO'
class SchedulerConfiguration(object):
"""This class represents the scheduler configuration"""
def __init__(self, scheduler=None):
"""Constructor
:param scheduler: The scheduler model, possibly None
:type scheduler: :class:`scheduler.models.Scheduler`
"""
self.is_paused = True
self.num_message_handlers = DEFAULT_NUM_MESSAGE_HANDLERS
self.queue_mode = DEFAULT_QUEUE_ORDER
self.system_logging_level = DEFAULT_LOGGING_LEVEL
if scheduler:
self.is_paused = scheduler.is_paused
self.num_message_handlers = scheduler.num_message_handlers
self.queue_mode = scheduler.queue_mode
self.system_logging_level = scheduler.system_logging_level
| StarcoderdataPython |
4834704 | #
# Blink.py -- Blink plugin for Ginga reference viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import GingaPlugin
from ginga.gw import Widgets
class Blink(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Blink, self).__init__(fv, fitsimage)
self.interval = 1.0
self.blink_timer = fv.get_timer()
self.blink_timer.set_callback('expired', self._blink_timer_cb)
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Blink')
self.settings.addDefaults(blink_channels=False)
self.settings.load(onError='silent')
self.blink_channels = self.settings.get('blink_channels', False)
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
self.msgFont = self.fv.getFont("sansFont", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(self.msgFont)
self.tw = tw
fr = Widgets.Expander("Instructions")
fr.set_widget(tw)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("Blink")
vbox2 = Widgets.VBox()
captions = (("Interval:", 'label', 'Interval', 'entry',
"Start Blink", 'button', "Stop Blink", 'button'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w = b
b.interval.set_text(str(self.interval))
b.interval.add_callback('activated', lambda w: self._set_interval_cb())
b.interval.set_tooltip("Interval in seconds between changing images")
b.start_blink.add_callback('activated',
lambda w: self._start_blink_cb())
b.stop_blink.add_callback('activated',
lambda w: self._stop_blink_cb())
vbox2.add_widget(w, stretch=0)
hbox = Widgets.HBox()
btn1 = Widgets.RadioButton("Blink channels")
btn1.add_callback('activated',
lambda w, tf: self._set_blink_mode_cb(tf == True))
btn1.set_tooltip("Choose this to blink across channels")
btn1.set_state(self.blink_channels)
self.w.blink_channels = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Blink images in channel", group=btn1)
btn2.set_state(not self.blink_channels)
btn2.add_callback('activated',
lambda w, tf: self._set_blink_mode_cb(tf == False))
btn2.set_tooltip("Choose this to blink images within a channel")
self.w.blink_within = btn2
hbox.add_widget(btn2)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox2.add_widget(hbox, stretch=0)
fr.set_widget(vbox2)
vbox.add_widget(fr, stretch=0)
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def instructions(self):
self.tw.set_text("""Blink the images in this channel.
Only images loaded in memory will be cycled.""")
def start(self):
self.instructions()
self.resume()
def pause(self):
self.stop_blinking()
def resume(self):
self.start_blinking()
def stop(self):
self.stop_blinking()
def redo(self):
pass
def _blink_timer_cb(self, timer):
# set timer
if self.blink_channels:
self.fv.gui_do(self.fv.next_channel)
else:
self.fv.gui_do(self.fv.next_img, loop=True)
timer.set(self.interval)
def start_blinking(self):
self.blink_timer.set(self.interval)
def stop_blinking(self):
self.blink_timer.clear()
def _start_blink_cb(self):
self._set_interval_cb()
self.start_blinking()
def _stop_blink_cb(self):
self.stop_blinking()
def _set_interval_cb(self):
interval = float(self.w.interval.get_text())
self.interval = max(min(interval, 30.0), 0.25)
self.stop_blinking()
self.start_blinking()
def _set_blink_mode_cb(self, tf):
self.blink_channels = tf
def __str__(self):
return 'blink'
#END
| StarcoderdataPython |
1725380 | import os
import randomcolor
rand_color = randomcolor.RandomColor()
hues = ['monochrome']
kiki_num = 0
for i in range(1,9):
for hue in hues:
os.system("convert all/kikiset-0{}.png -fuzz 50% -fill '{}' -opaque red processed/kiki_{}.png".format(i,'#b7b7b7',kiki_num))
kiki_num += 1
| StarcoderdataPython |
1734698 | <reponame>Pritam055/python-ProblemSolving
class TestDataEmptyArray(object):
@staticmethod
def get_array():
# complete this function
return list()
class TestDataUniqueValues(object):
@staticmethod
def get_array():
# complete this function
return [5, 2, 8, 3, 1, -6, 9]
@staticmethod
def get_expected_result():
# complete this function
return 5
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
# complete this function
return [5, 2, 8, 3, 1, -6, 9, -6, 10]
@staticmethod
def get_expected_result():
# complete this function
return 5
| StarcoderdataPython |
3370728 | <filename>open_publishing/context/bisac_subjects.py
from open_publishing.core.enums import BisacCode, VLBCategory
from open_publishing.bisac import BisacSubject
class BisacSubjects(object):
def __init__(self,
context):
self._ctx = context
def load(self,
bisac_code=None,
internal_id=None):
if len([i for i in [bisac_code, internal_id] if i is not None]) != 1:
raise ValueError('Only one of bisac_code/internal_id should be specified')
elif bisac_code is not None:
if bisac_code in BisacCode:
subject_id = self._ctx.gjp.resolve_enum(BisacCode,
enum=bisac_code).internal_id
else:
subject_id = self._ctx.gjp.resolve_enum(BisacCode,
code=bisac_code).internal_id
elif internal_id is not None:
subject_id = internal_id
return BisacSubject(self._ctx,
subject_id)
def search(self,
vlb_category):
if vlb_category is None:
raise ValueError("vlb_category should not be None")
elif vlb_category in VLBCategory:
category_id = self._ctx.gjp.resolve_enum(VLBCategory,
enum=vlb_category).internal_id
else:
category_id = self._ctx.gjp.resolve_enum(VLBCategory,
code=vlb_category).internal_id
subject_ids = self._ctx.gjp.vlb_to_bisac(category_id)
subjects = []
for subject_id in subject_ids:
subjects.append(BisacSubject(self._ctx,
subject_id))
return subjects
| StarcoderdataPython |
4828529 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 8 22:22:26 2016
configuration for the echo server.
@author: eikes
"""
address = ('localhost', 12345) | StarcoderdataPython |
81480 | import json
import numbers
import fhirpathpy.engine.util as util
from fhirpathpy.engine.evaluators import evaluators
from fhirpathpy.engine.invocations import invocations
def check_integer_param(val):
data = util.get_data(val)
if int(data) != data:
raise Exception("Expected integer, got: " + json.dumps(data))
return data
def check_number_param(val):
data = util.get_data(val)
if not isinstance(data, numbers.Number):
raise Exception("Expected number, got: " + json.dumps(data))
return data
def check_boolean_param(val):
data = util.get_data(val)
if data == True or data == False:
return data
raise Exception("Expected boolean, got: " + json.dumps(data))
def check_string_param(val):
data = util.get_data(val)
if not isinstance(data, str):
raise Exception("Expected string, got: " + json.dumps(data))
return data
def do_eval(ctx, parentData, node):
node_type = node["type"]
if node_type in evaluators:
evaluator = evaluators.get(node_type)
return evaluator(ctx, parentData, node)
raise Exception("No " + node_type + " evaluator ")
def doInvoke(ctx, fn_name, data, raw_params):
if isinstance(fn_name, list) and len(fn_name) == 1:
fn_name = fn_name[0]
if type(fn_name) != str or not fn_name in invocations:
raise Exception("Not implemented: " + str(fn_name))
invocation = invocations[fn_name]
if not "arity" in invocation:
if raw_params is None or util.is_empty(raw_params):
res = invocation["fn"](ctx, util.arraify(data))
return util.arraify(res)
raise Exception(fn_name + " expects no params")
paramsNumber = 0
if isinstance(raw_params, list):
paramsNumber = len(raw_params)
if not paramsNumber in invocation["arity"]:
raise Exception(fn_name + " wrong arity: got " + str(paramsNumber))
params = []
argTypes = invocation["arity"][paramsNumber]
for i in range(0, paramsNumber):
tp = argTypes[i]
pr = raw_params[i]
params.append(make_param(ctx, data, tp, pr))
params.insert(0, data)
params.insert(0, ctx)
if "nullable" in invocation:
if any(util.is_nullable(x) for x in params):
return []
res = invocation["fn"](*params)
return util.arraify(res)
param_check_table = {
"Integer": check_integer_param,
"Number": check_number_param,
"Boolean": check_boolean_param,
"String": check_string_param,
}
def make_param(ctx, parentData, node_type, param):
ctx["currentData"] = parentData
if node_type == "Expr":
def func(data):
return do_eval(ctx, util.arraify(data), param)
return func
if node_type == "AnyAtRoot":
return do_eval(ctx, ctx["dataRoot"], param)
if node_type == "Identifier":
if param["type"] == "TermExpression":
return param["text"]
raise Exception("Expected identifier node, got " + json.dumps(param))
res = do_eval(ctx, parentData, param)
if node_type == "Any":
return res
if isinstance(node_type, list):
if len(res) == 0:
return []
else:
node_type = node_type[0]
if len(res) > 1:
raise Exception(
"Unexpected collection"
+ json.dumps(res)
+ "; expected singleton of type "
+ node_type
)
if len(res) == 0:
return []
if node_type not in param_check_table:
raise Exception("Implement me for " + node_type)
check = param_check_table[node_type]
return check(res[0])
def infix_invoke(ctx, fn_name, data, raw_params):
if not fn_name in invocations or not "fn" in invocations[fn_name]:
raise Exception("Not implemented " + fn_name)
invocation = invocations[fn_name]
paramsNumber = len(raw_params)
if paramsNumber != 2:
raise Exception("Infix invoke should have arity 2")
argTypes = invocation["arity"][paramsNumber]
if argTypes is not None:
params = [ctx]
for i in range(0, paramsNumber):
argType = argTypes[i]
rawParam = raw_params[i]
params.append(make_param(ctx, data, argType, rawParam))
if "nullable" in invocation:
if any(util.is_nullable(x) for x in params):
return []
res = invocation["fn"](*params)
return util.arraify(res)
print(fn_name + " wrong arity: got " + paramsNumber)
return []
| StarcoderdataPython |
21773 | <reponame>cauabernardino/cabinet
import pathlib
import shutil
from typing import Dict, List, Union
from cabinet.consts import SUPPORTED_FILETYPES
def dir_parser(path_to_dir: str) -> Dict[str, Dict[str, str]]:
"""
Parses the given directory, and returns the path, stem and suffix for files.
"""
files = pathlib.Path(path_to_dir).resolve().glob("*.*")
files_data = {}
for file in files:
files_data[file.stem] = {
"suffix": file.suffix,
"path": file.as_posix(),
}
return files_data
def bin_resolver(file_data: Dict[str, str]) -> Union[List[str], None]:
"""
Resolves the right binary to run the script.
"""
file_suffix = file_data["suffix"]
if file_suffix in SUPPORTED_FILETYPES.keys():
commands = SUPPORTED_FILETYPES[file_suffix].split(" ")
commands[0] = shutil.which(commands[0])
return commands
return None
| StarcoderdataPython |
3211293 | # Original algorithm was published by <NAME> and colleagues as EmptyDrops (Lun, A. et al. Distinguishing cells from empty droplets in droplet-based single-cell RNA sequencing data.)
# This implementation is based on the code in cellranger v3.0 by 10x Genomics
# Copyright 2018 10X Genomics, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import numpy as np
import scipy.sparse as sparse
import scipy.stats as sp_stats
# Simple Good-Turing estimator.
# Based on S implementation in
# <NAME> & <NAME> (1995) Good-turing frequency estimation without tears,
# Journal of Quantitative Linguistics, 2:3, 217-237, DOI: 10.1080/09296179508590051
class SimpleGoodTuringError(Exception):
pass
def _averaging_transform(r, nr):
d = np.concatenate((np.ones(1, dtype=int), np.diff(r)))
dr = np.concatenate((
0.5 * (d[1:] + d[0:-1]),
np.array((d[-1],), dtype=float),
))
return nr.astype(float) / dr
def _rstest(r, coef):
return r * np.power(1 + 1 / r, 1 + coef)
def simple_good_turing(xr, xnr):
"""Make a Simple Good-Turing estimate of the frequencies.
Args:
xr (np.array(int)): Non-zero item frequencies
xnr (np.array(int)): Non-zero frequencies of frequencies
Returns:
(rstar (np.array(float)), p0 (float)):
rstar: The adjusted non-zero frequencies
p0: The total probability of unobserved items
"""
xr = xr.astype(float)
xnr = xnr.astype(float)
xN = np.sum(xr * xnr)
# Get Linear Good-Turing estimate
xnrz = _averaging_transform(xr, xnr)
slope, intercept, _, _, _ = sp_stats.linregress(np.log(xr), np.log(xnrz))
if slope > -1:
raise SimpleGoodTuringError("The log-log slope is > -1 (%d); the SGT estimator is not applicable to these data." % slope)
xrst = _rstest(xr, slope)
xrstrel = xrst / xr
# Get traditional Good-Turing estimate
xrtry = xr == np.concatenate((xr[1:] - 1, np.zeros(1)))
xrstarel = np.zeros(len(xr))
xrstarel[xrtry] = (xr[xrtry] + 1) / xr[xrtry] * np.concatenate((xnr[1:], np.zeros(1)))[xrtry] / xnr[xrtry]
# Determine when to switch from GT to LGT estimates
tursd = np.ones(len(xr))
for i in range(len(xr)):
if xrtry[i]:
tursd[i] = float(i + 2) / xnr[i] * np.sqrt(xnr[i + 1] * (1 + xnr[i + 1] / xnr[i]))
xrstcmbrel = np.zeros(len(xr))
useturing = True
for r in range(len(xr)):
if not useturing:
xrstcmbrel[r] = xrstrel[r]
else:
if np.abs(xrstrel[r] - xrstarel[r]) * (1 + r) / tursd[r] > 1.65:
xrstcmbrel[r] = xrstarel[r]
else:
useturing = False
xrstcmbrel[r] = xrstrel[r]
# Renormalize the probabilities for observed objects
sumpraw = np.sum(xrstcmbrel * xr * xnr / xN)
xrstcmbrel = xrstcmbrel * (1 - xnr[0] / xN) / sumpraw
p0 = xnr[0] / xN
return (xr * xrstcmbrel, p0)
def sgt_proportions(frequencies):
"""Use Simple Good-Turing estimate to adjust for unobserved items
Args:
frequencies (np.array(int)): Nonzero frequencies of items
Returns:
(pstar (np.array(float)), p0 (float)):
pstar: The adjusted non-zero proportions
p0: The total probability of unobserved items
"""
if len(frequencies) == 0:
raise ValueError("Input frequency vector is empty")
if np.count_nonzero(frequencies) != len(frequencies):
raise ValueError("Frequencies must be greater than zero")
freqfreqs = np.bincount(frequencies.astype(np.int64))
assert freqfreqs[0] == 0
use_freqs = np.flatnonzero(freqfreqs)
if len(use_freqs) < 10:
raise SimpleGoodTuringError("Too few non-zero frequency items (%d). Aborting SGT." % len(use_freqs))
rstar, p0 = simple_good_turing(use_freqs, freqfreqs[use_freqs])
# rstar contains the smoothed frequencies.
# Map each original frequency r to its smoothed rstar.
rstar_dict = dict(zip(use_freqs, rstar))
rstar_sum = np.sum(freqfreqs[use_freqs] * rstar)
rstar_i = np.fromiter((rstar_dict[f] for f in frequencies), dtype=float, count=len(frequencies))
pstar = (1 - p0) * (rstar_i / rstar_sum)
assert np.isclose(p0 + np.sum(pstar), 1)
return (pstar, p0)
def adjust_pvalue_bh(p):
""" Multiple testing correction of p-values using the Benjamini-Hochberg procedure """
descending = np.argsort(p)[::-1]
# q = p * N / k where p = p-value, N = # tests, k = p-value rank
scale = float(len(p)) / np.arange(len(p), 0, -1)
q = np.minimum(1, np.minimum.accumulate(scale * p[descending]))
# Return to original order
return q[np.argsort(descending)]
def eval_multinomial_loglikelihoods(matrix, profile_p, max_mem_gb=0.1):
"""Compute the multinomial log PMF for many barcodes
Args:
matrix (scipy.sparse.csc_matrix): Matrix of UMI counts (feature x barcode)
profile_p (np.ndarray(float)): Multinomial probability vector
max_mem_gb (float): Try to bound memory usage.
Returns:
log_likelihoods (np.ndarray(float)): Log-likelihood for each barcode
"""
gb_per_bc = float(matrix.shape[0] * matrix.dtype.itemsize) / (1024**3)
bcs_per_chunk = max(1, int(round(max_mem_gb / gb_per_bc)))
num_bcs = matrix.shape[1]
loglk = np.zeros(num_bcs)
for chunk_start in range(0, num_bcs, bcs_per_chunk):
chunk = slice(chunk_start, chunk_start + bcs_per_chunk)
matrix_chunk = matrix[:, chunk].transpose().toarray()
n = matrix_chunk.sum(1)
loglk[chunk] = sp_stats.multinomial.logpmf(matrix_chunk, n, p=profile_p)
return loglk
def simulate_multinomial_loglikelihoods(profile_p, umis_per_bc, num_sims=1000, jump=1000, n_sample_feature_block=1000000, verbose=False):
"""Simulate draws from a multinomial distribution for various values of N.
Uses the approximation from Lun et al. ( https://www.biorxiv.org/content/biorxiv/early/2018/04/04/234872.full.pdf )
Args:
profile_p (np.ndarray(float)): Probability of observing each feature.
umis_per_bc (np.ndarray(int)): UMI counts per barcode (multinomial N).
num_sims (int): Number of simulations per distinct N value.
jump (int): Vectorize the sampling if the gap between two distinct Ns exceeds this.
n_sample_feature_block (int): Vectorize this many feature samplings at a time.
Returns:
(distinct_ns (np.ndarray(int)), log_likelihoods (np.ndarray(float)):
distinct_ns is an array containing the distinct N values that were simulated.
log_likelihoods is a len(distinct_ns) x num_sims matrix containing the
simulated log likelihoods.
"""
distinct_n = np.flatnonzero(np.bincount(umis_per_bc.astype(np.int64)))
loglk = np.zeros((len(distinct_n), num_sims), dtype=float)
sampled_features = np.random.choice(len(profile_p), size=n_sample_feature_block, p=profile_p, replace=True)
k = 0
log_profile_p = np.log(profile_p)
for sim_idx in range(num_sims):
curr_counts = np.ravel(sp_stats.multinomial.rvs(distinct_n[0], profile_p, size=1))
curr_loglk = sp_stats.multinomial.logpmf(curr_counts, distinct_n[0], p=profile_p)
loglk[0, sim_idx] = curr_loglk
for i in range(1, len(distinct_n)):
step = distinct_n[i] - distinct_n[i - 1]
if step >= jump:
# Instead of iterating for each n, sample the intermediate ns all at once
curr_counts += np.ravel(sp_stats.multinomial.rvs(step, profile_p, size=1))
curr_loglk = sp_stats.multinomial.logpmf(curr_counts, distinct_n[i], p=profile_p)
assert not np.isnan(curr_loglk)
else:
# Iteratively sample between the two distinct values of n
for n in range(distinct_n[i - 1] + 1, distinct_n[i] + 1):
j = sampled_features[k]
k += 1
if k >= n_sample_feature_block:
# Amortize this operation
sampled_features = np.random.choice(len(profile_p), size=n_sample_feature_block, p=profile_p, replace=True)
k = 0
curr_counts[j] += 1
curr_loglk += log_profile_p[j] + np.log(float(n) / curr_counts[j])
loglk[i, sim_idx] = curr_loglk
return distinct_n, loglk
def compute_ambient_pvalues(umis_per_bc, obs_loglk, sim_n, sim_loglk):
"""Compute p-values for observed multinomial log-likelihoods
Args:
umis_per_bc (nd.array(int)): UMI counts per barcode
obs_loglk (nd.array(float)): Observed log-likelihoods of each barcode deriving from an ambient profile
sim_n (nd.array(int)): Multinomial N for simulated log-likelihoods
sim_loglk (nd.array(float)): Simulated log-likelihoods of shape (len(sim_n), num_simulations)
Returns:
pvalues (nd.array(float)): p-values
"""
assert len(umis_per_bc) == len(obs_loglk)
assert sim_loglk.shape[0] == len(sim_n)
# Find the index of the simulated N for each barcode
sim_n_idx = np.searchsorted(sim_n, umis_per_bc)
num_sims = sim_loglk.shape[1]
num_barcodes = len(umis_per_bc)
pvalues = np.zeros(num_barcodes)
for i in range(num_barcodes):
num_lower_loglk = np.sum(sim_loglk[sim_n_idx[i], :] < obs_loglk[i])
pvalues[i] = float(1 + num_lower_loglk) / (1 + num_sims)
return pvalues
def estimate_profile_sgt(matrix, barcode_indices, nz_feat):
""" Estimate a gene expression profile by Simple Good Turing.
Args:
raw_mat (sparse matrix): Sparse matrix of all counts
barcode_indices (np.array(int)): Barcode indices to use
nz_feat (np.array(int)): Indices of features that are non-zero at least once
Returns:
profile (np.array(float)): Estimated probabilities of length len(nz_feat).
"""
# Initial profile estimate
prof_mat = matrix[:, barcode_indices]
profile = np.ravel(prof_mat[nz_feat, :].sum(axis=1))
zero_feat = np.flatnonzero(profile == 0)
# Simple Good Turing estimate
p_smoothed, p0 = sgt_proportions(profile[np.flatnonzero(profile)])
# Distribute p0 equally among the zero elements.
p0_i = p0 / len(zero_feat)
profile_p = np.repeat(p0_i, len(nz_feat))
profile_p[np.flatnonzero(profile)] = p_smoothed
assert np.isclose(profile_p.sum(), 1.0)
return profile_p
# Construct a background expression profile from barcodes with <= T UMIs
def est_background_profile_sgt(matrix, use_bcs):
""" Estimate a gene expression profile on a given subset of barcodes.
Use Good-Turing to smooth the estimated profile.
Args:
matrix (scipy.sparse.csc_matrix): Sparse matrix of all counts
use_bcs (np.array(int)): Indices of barcodes to use (col indices into matrix)
Returns:
profile (use_features, np.array(float)): Estimated probabilities of length use_features.
"""
# Use features that are nonzero anywhere in the data
use_feats = np.flatnonzero(np.asarray(matrix.sum(1)))
# Estimate background profile
bg_profile_p = estimate_profile_sgt(matrix, use_bcs, use_feats)
return (use_feats, bg_profile_p)
# <NAME>'s version (Aug 2019)
def call_cells(matrix: sparse.csr_matrix, expected_n_cells: int = 5000) -> np.ndarray:
"""
Determine likely true cells among the barcodes by contrasting with the ambient RNA profile
Args:
matrix: expression matrix
expected_n_cells: expected number of true cells in the sample
Returns:
calls: vector of bools indicating true cell barcodes
"""
n_barcodes = matrix.shape[1]
expected_n_cells = min(expected_n_cells, n_barcodes // 5)
total_umis = np.array(matrix.sum(axis=0))[0] # total UMIs per barcode
# upper limit of UMIs for barcodes considered ambient, calculated as greatest UMI count after removing twice the expected number of cells
max_ambient_umis = np.percentile(total_umis, 100 * (n_barcodes - expected_n_cells * 2) / n_barcodes)
# median number of UMIs among the top expected_n_cells barcodes
median_initial_umis = np.median(total_umis[total_umis > np.percentile(total_umis, 100 * (n_barcodes - expected_n_cells) / n_barcodes)])
min_cell_umis = int(max(500, median_initial_umis * 0.1)) # 10% of median, but at least 500 UMIs
# Ambient RNA beads, covering the range 20 to max_amient_umis
ambient_bcs = (total_umis < max_ambient_umis) & (total_umis > 20)
if ambient_bcs.sum() == 0:
# No beads were ambient, because cells had very low UMIs
logging.warning("No ambient RNA beads were found; maybe sample had too few cells?")
return max_ambient_umis, np.ones_like(total_umis)
try:
eval_features, ambient_profile_p = est_background_profile_sgt(matrix, ambient_bcs)
except SimpleGoodTuringError as e:
logging.error(e)
return max_ambient_umis, np.ones_like(total_umis)
# Evaluate candidate barcodes
eval_bcs = total_umis > min_cell_umis
eval_mat = matrix[eval_features, :][:, eval_bcs]
# Compute observed log-likelihood of barcodes being generated from ambient RNA
obs_loglk = eval_multinomial_loglikelihoods(eval_mat, ambient_profile_p)
# Simulate log likelihoods
distinct_ns, sim_loglk = simulate_multinomial_loglikelihoods(ambient_profile_p, total_umis[eval_bcs], num_sims=1000, verbose=True)
# Compute p-values
pvalues = compute_ambient_pvalues(total_umis[eval_bcs], obs_loglk, distinct_ns, sim_loglk)
pvalues_adj = adjust_pvalue_bh(pvalues)
pvalues_adj_all = np.ones_like(total_umis)
pvalues_adj_all[eval_bcs] = pvalues_adj
return max_ambient_umis, pvalues_adj_all
| StarcoderdataPython |
137642 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Mimic World lettuce object
"""
import os
import shutil
import time
import pkg_resources
import datetime
import pprint
from bigml.api import BigML
from bigml.api import HTTP_OK, HTTP_NO_CONTENT, HTTP_UNAUTHORIZED
from bigml.constants import IRREGULAR_PLURALS, RENAMED_RESOURCES
from bigml.api_handlers.externalconnectorhandler import get_env_connection_info
from bigml.util import get_exponential_wait
from nose.tools import assert_less
MAX_RETRIES = 10
RESOURCE_TYPES = [
'cluster',
'fusion',
'optiml',
'source',
'dataset',
'prediction',
'evaluation',
'ensemble',
'batchprediction',
'centroid',
'batchcentroid',
'anomaly',
'anomalyscore',
'batchanomalyscore',
'project',
'sample',
'correlation',
'statisticaltest',
'logisticregression',
'model',
'deepnet',
'association',
'associationset',
'configuration',
'topicmodel',
'topicdistribution',
'timeseries',
'forecast',
'pca',
'projection',
'batchprojection',
'linearregression',
'script',
'execution',
'library',
'externalconnector'
]
irregular_plurals = {}
irregular_plurals.update(IRREGULAR_PLURALS)
irregular_plurals.update({"timeseries": "time_series_set"})
def plural(resource_type):
"""Creates the plural form of a resource type
"""
return irregular_plurals.get(resource_type, "%ss" % resource_type)
def show_doc(self, examples=None):
""" Shows the name and documentation of the method passed as argument
"""
print("%s:\n%s" % (self.__name__, self.__doc__))
if examples:
print(" |%s" % \
"\n |".join(["|".join([str(item)
for item in example]) for
example in examples]))
class World(object):
def __init__(self):
self.USERNAME = None
self.API_KEY = None
self.api = None
self.debug = False
try:
self.debug = bool(os.environ.get('BIGML_DEBUG', 0))
except ValueError:
pass
self.short_debug = False
try:
self.short_debug = bool(os.environ.get('BIGML_SHORT_DEBUG', 0))
except ValueError:
pass
self.clear()
self.dataset_ids = []
self.fields_properties_dict = {}
self.counters = {}
self.test_project_name = "Test: python bindings %s" % \
datetime.datetime.now()
self.project_id = None
self.print_connection_info()
self.delta = int(os.environ.get('BIGML_DELTA', '1'))
def print_connection_info(self):
self.USERNAME = os.environ.get('BIGML_USERNAME')
self.API_KEY = os.environ.get('BIGML_API_KEY')
self.EXTERNAL_CONN = get_env_connection_info()
if self.USERNAME is None or self.API_KEY is None:
assert False, ("Tests use the BIGML_USERNAME and BIGML_API_KEY"
" environment variables to authenticate the"
" connection, but they seem to be unset. Please,"
"set them before testing.")
self.api = BigML(self.USERNAME, self.API_KEY, debug=self.debug,
short_debug=self.short_debug,
organization=None if not hasattr(
self.api, "organization") else organization,
storage=(None if not (self.debug or self.short_debug)
else "./debug_storage"))
print("----------------------------------------------------------")
print(self.api.connection_info())
print(self.external_connection_info())
print("----------------------------------------------------------")
def external_connection_info(self):
"""Printable string: The information used to connect to a external
data source
"""
info = "External data connection config:\n%s" % \
pprint.pformat(self.EXTERNAL_CONN, indent=4)
return info
def clear(self):
"""Clears the stored resources' ids
"""
for resource_type in RESOURCE_TYPES:
setattr(self, plural(resource_type), [])
setattr(self, RENAMED_RESOURCES.get(resource_type,
resource_type), None)
def delete_resources(self):
"""Deletes the created objects
"""
for resource_type in RESOURCE_TYPES:
object_list = set(getattr(self, plural(resource_type)))
if object_list:
print("Deleting %s %s" % (len(object_list),
plural(resource_type)))
delete_method = self.api.deleters[resource_type]
for obj_id in object_list:
counter = 0
result = delete_method(obj_id)
while (result['code'] != HTTP_NO_CONTENT and
counter < MAX_RETRIES):
print("Delete failed for %s. Retrying" % obj_id)
time.sleep(3 * self.delta)
counter += 1
result = delete_method(obj_id)
if counter == MAX_RETRIES:
print ("Retries to delete the created resources are"
" exhausted. Failed to delete.")
def store_resources(self):
"""Stores the created objects
"""
for resource_type in RESOURCE_TYPES:
object_list = set(getattr(self, plural(resource_type)))
if object_list:
print("Deleting %s %s" % (len(object_list),
plural(resource_type)))
store_method = self.api.getters[resource_type]
for obj_id in object_list:
counter = 0
result = store_method(obj_id)
self.api.ok(result)
world = World()
def res_filename(file):
return pkg_resources.resource_filename('bigml', "../%s" % file)
def setup_module():
"""Operations to be performed before each module
"""
if world.project_id is None:
world.project_id = world.api.create_project( \
{"name": world.test_project_name})['resource']
world.clear()
def teardown_module():
"""Operations to be performed after each module
"""
print("Teardown module ---------------------------")
if not world.debug and not world.short_debug:
if os.path.exists('./tmp'):
shutil.rmtree('./tmp')
world.delete_resources()
project_stats = world.api.get_project( \
world.project_id)['object']['stats']
for resource_type, value in list(project_stats.items()):
if value['count'] != 0:
# assert False, ("Increment in %s: %s" % (resource_type, value))
print("WARNING: Increment in %s: %s" % (resource_type, value))
world.api.delete_project(world.project_id)
world.project_id = None
else:
world.store_resources()
def teardown_class():
"""Operations to be performed after each class
"""
world.dataset_ids = []
world.local_ensemble = None
world.local_model = None
world.local_deepnet = None
def logged_wait(start, delta, count, res_description):
"""Comparing the elapsed time to the expected delta and waiting for
the next sleep period.
"""
wait_time = min(get_exponential_wait(delta / 100.0, count), delta)
print("Sleeping %s" % wait_time)
time.sleep(wait_time)
elapsed = (datetime.datetime.utcnow() - start).seconds
if elapsed > delta / 2.0:
print("%s seconds waiting for %s" % \
(elapsed, res_description))
assert_less(elapsed, delta)
| StarcoderdataPython |
1647113 | <gh_stars>1-10
import math
import torch
def get_adam_delta(grads, optimizer):
deltas = {}
for group in optimizer.param_groups:
for n, p in zip(group['names'], group['params']):
grad = grads[n]
state = optimizer.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
step = state['step'] + 1
if group['weight_decay'] != 0:
grad = grad + group['weight_decay'] * p.data
bias_correction1 = 1. - beta1 ** step
bias_correction2 = 1. - beta2 ** step
step_size = group['lr'] / bias_correction1
_exp_avg = exp_avg * beta1 + (1. - beta1) * grad
_exp_avg_sq = exp_avg_sq * beta2 + (1. - beta2) * grad * grad
denom = (torch.sqrt(_exp_avg_sq + group['eps']) / math.sqrt(bias_correction2)).add_(group['eps'])
deltas[n] = -step_size * _exp_avg / denom
return deltas | StarcoderdataPython |
1666916 | <reponame>kissmikijr/hammurabi
"""
Files preconditions module contains simple preconditions used for checking
file existence.
"""
from pathlib import Path
from hammurabi.preconditions.base import Precondition
class IsFileExist(Precondition):
"""
Check if the given file exists.
Example usage:
.. code-block:: python
>>> from pathlib import Path
>>> from hammurabi import Law, Pillar, Renamed, IsFileExist
>>>
>>> example_law = Law(
>>> name="Name of the law",
>>> description="Well detailed description what this law does.",
>>> rules=(
>>> Renamed(
>>> name="Rename the file if an other one exists",
>>> path=Path("old-name"),
>>> new_name="new-name",
>>> preconditions=[
>>> IsFileExist(path=Path("other-file"))
>>> ]
>>> ),
>>> )
>>> )
>>>
>>> pillar = Pillar()
>>> pillar.register(example_law)
:param path: Input files's path
:type path: Path
"""
def __init__(self, path: Path, **kwargs) -> None:
super().__init__(param=path, **kwargs)
def task(self) -> bool:
"""
Check if the given file exists.
:return: Returns True if the file exists
:rtype: bool
"""
self.param: Path
return self.param.exists() and self.param.is_file()
class IsFileNotExist(IsFileExist):
"""
Opposite of :class:`hammurabi.preconditions.files.IsFileExist`.
"""
def task(self) -> bool:
"""
Check if the given file not exists.
:return: Returns True if the file not exists
:rtype: bool
"""
return not super().task()
| StarcoderdataPython |
3331388 | from pathlib import Path
from django.db.models.signals import post_save
from django.dispatch import receiver
from main.models import ContextPhoto, BagPhoto
from main.tasks import cp_thumbnail, bp_thumbnail
def tn_is_same(cp):
"""
Filename for thumbnails should be "tn_" followed by the
filename for the full-size photo
"""
photo_stem = Path(cp.photo.file.name).stem
tn_stem = Path(cp.thumbnail.file.name).stem
return tn_stem.startswith(f"tn_{photo_stem}")
@receiver(post_save, sender=ContextPhoto)
def start_cp_thumbnail(sender, **kwargs):
cp = kwargs["instance"]
if not cp.thumbnail or (not tn_is_same(cp)):
cp_thumbnail.delay(cp.id)
@receiver(post_save, sender=BagPhoto)
def start_bp_thumbnail(sender, **kwargs):
bp = kwargs["instance"]
if not bp.thumbnail or (not tn_is_same(bp)):
bp_thumbnail.delay(bp.id) | StarcoderdataPython |
1669438 | from checks.worker import WorkerCheck
from polyaxon.config_settings import SchedulerCeleryTasks
class SchedulerCheck(WorkerCheck):
WORKER_HEALTH_TASK = SchedulerCeleryTasks.SCHEDULER_HEALTH
WORKER_NAME = 'SCHEDULER'
| StarcoderdataPython |
3264100 | <reponame>manuelsousa7/ia-labs<gh_stars>0
class p_anel_ex_6:
def __init__(self, n):
self.n = n
def num(self):
return self.n
p51 = p_anel_ex_6(0)
p52 = p_anel_ex_6(5)
p53 = p_anel_ex_6(50)
def agente_anel_ex_6(p):
n = p.num()
if(n == 0):
print "esperar"
else:
print "andar"
agente_anel_ex_6(p51)
agente_anel_ex_6(p52)
agente_anel_ex_6(p53) | StarcoderdataPython |
30697 | import dash
from dash import Output, Input, dcc
from dash import html
from tabs import tab1, tab2
# from tab2_callbacks import tab2_out, upload_prediction, render_graph2
import flask
server = flask.Flask(__name__) # define flask app.server
external_stylesheets = [
{
"href": "https://fonts.googleapis.com/css2?"
"family=Lato:wght@400;700&display=swap",
"rel": "stylesheet",
},
# dbc.themes.BOOTSTRAP,
# "https://cdn.jsdelivr.net/npm/bootstrap@5.1.0/dist/css/bootstrap.min.css"
]
app = dash.Dash(__name__,
server=server,
external_stylesheets=external_stylesheets)
app.title = "Tweet the Stocks"
app.layout = html.Div(
children=[
html.Div(
children=[
html.H1(children="Tweet the Stocks", className="header-title"),
html.P(
children="Explore the correlation of stock prices and the related tagged tweets in 2019",
className="header-description",
),
],
className="header",
),
html.Div(
children=[
html.Div(
dcc.Tabs(id="tabs", value='tab1', children=[
dcc.Tab(label='Historical records', value='tab1', ),
dcc.Tab(label='Prediction', value='tab2'),
], colors={
"border": "white",
"primary": "#e36209",
"background": "#fafbfc"
})),
],
className="tabs",
),
tab1.layout,
tab2.layout,
]
)
@app.callback(
Output('tab1', 'style'), Output('tab2', 'style'),
[Input('tabs', 'value')])
def show_hide_tab(tab):
if tab == 'tab1':
return {'display': 'block'}, {'display': 'none'}
elif tab == 'tab2':
return {'display': 'none'}, {'display': 'block'}
@app.callback(Output('popover1', 'children'), Input('import', 'n_clicks'), Input('upload-data', 'contents'),Input('tabs', 'value'))
def hint(clicks, file_content, tab):
if clicks > 0 and file_content and tab=="tab1":
return f"Calculating tweet sentiment scores..."
return ""
| StarcoderdataPython |
3381177 | # -*- coding: iso-8859-15 -*-
import spanishconjugator
from spanishconjugator.SpanishConjugator import Conjugator
# ------------------------------ Simple Conditional Conditional Tense ------------------------------- #
def test_simple_conditional_conditional_yo_ar():
expected = "hablaría"
assert Conjugator().conjugate('hablar','simple_conditional','conditional','yo') == expected
def test_simple_conditional_conditional_tu_ar():
expected = "hablarías"
assert Conjugator().conjugate('hablar','simple_conditional','conditional','tu') == expected
def test_simple_conditional_conditional_usted_ar():
expected = "hablaría"
assert Conjugator().conjugate('hablar','simple_conditional','conditional','usted') == expected
def test_simple_conditional_conditional_nosotros_ar():
expected = "hablaríamos"
assert Conjugator().conjugate('hablar','simple_conditional','conditional','nosotros') == expected
def test_simple_conditional_conditional_vosotros_ar():
expected = "hablaríais"
assert Conjugator().conjugate('hablar','simple_conditional','conditional','vosotros') == expected
def test_simple_conditional_conditional_ustedes_ar():
expected = "hablarían"
assert Conjugator().conjugate('hablar','simple_conditional','conditional','ustedes') == expected
def test_simple_conditional_conditional_yo_er():
expected = "bebería"
assert Conjugator().conjugate('beber','simple_conditional','conditional','yo') == expected
def test_simple_conditional_conditional_tu_ir():
expected = "vivirías"
assert Conjugator().conjugate('vivir','simple_conditional','conditional','tu') == expected
def test_simple_conditional_conditional_usted_er():
expected = "bebería"
assert Conjugator().conjugate('beber','simple_conditional','conditional','usted') == expected
def test_simple_conditional_conditional_nosotros_ir():
expected = "viviríamos"
assert Conjugator().conjugate('vivir','simple_conditional','conditional','nosotros') == expected
def test_simple_conditional_conditional_vosotros_er():
expected = "beberíais"
assert Conjugator().conjugate('beber','simple_conditional','conditional','vosotros') == expected
def test_simple_conditional_conditional_ustedes_ir():
expected = "vivirían"
assert Conjugator().conjugate('vivir','simple_conditional','conditional','ustedes') == expected
| StarcoderdataPython |
1670347 | <filename>data_preparation/data_preparation.py
import numpy as np
'''This is the ordering assumed in the feature channel'''
AX_INDEXES = {'t': 0, 'x': 1, 'y': 2, 'z': 3}
def prepare_image_data(fpath):
orig_data = np.load(fpath).tolist()
images = list()
true_z = list()
for val in orig_data.itervalues():
append_image(val, images, true_z)
return {'images': np.array(images), 'stl_z': np.array(true_z)}
def append_image(val, images, true_z):
'''
It is assumed but not enforced that the ordering is the same as in AX_INDEXES.
'''
if 'stl_z' not in val:
return
images.append(val['height_mat_mm'])
true_z.append(val['stl_z'])
def prepare_scan_data(fpath):
orig_data = np.load(fpath).tolist()
txyz = list()
true_z = list()
for val in orig_data.itervalues():
append_z_map(val, txyz, true_z)
return {'image_4d_xyzt': np.array(txyz), 'stl_z': np.array(true_z)}
def append_z_map(val, txyz, true_z):
'''
It is assumed but not enforced that the ordering is the same as in AX_INDEXES.
'''
if 'stl_z' not in val:
return
new_txyz = np.array([val['t'], val['x'], val['y'], val['z']])
new_txyz = np.moveaxis(new_txyz, [0, 1, 2], [2, 0, 1])
txyz.append(new_txyz)
true_z.append(val['stl_z'])
def get_z_data_for_learning(fpath):
all_data = prepare_scan_data(fpath)
z = all_data['image_4d_xyzt'][:, :, :, AX_INDEXES['z']]
true_z = all_data['stl_z']
return z, true_z | StarcoderdataPython |
1617093 | import abc
class BaseMod(abc.ABC):
def __init__(self):
pass
@abc.abstractmethod
def get_mod_name(self):
raise NotImplementedError
@abc.abstractmethod
def dump_parameters(self) -> str:
raise NotImplementedError
@abc.abstractmethod
def load_parameters(self, params: str):
raise NotImplementedError | StarcoderdataPython |
4823860 | # -*- encoding: utf-8 -*-
'''
Current module: pyrunner.ext.idleshell.diyrun
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-<NAME>(罗科峰)
MAIL: <EMAIL>
RCS: rock4.common.dev.idleshell.diyrun,v 2.0 2017年2月7日
FROM: 2016年8月16日
********************************************************************
======================================================================
UI and Web Http automation frame for python.
'''
import time,socket,sys,thread,threading,Queue,traceback
from diyoid import oidmap
from idlelib import run,rpc
from SimpleAutoComplete import SimpleAutoComplete
class MyExecuteve(run.Executive):
def __init__(self,rpchandler):
run.Executive.__init__(self, rpchandler)
self.autocomplete = SimpleAutoComplete(debug = False)
def poll_var(self, item, subobj=""):
if not item in self.locals:
return
var = self.locals.get(item)
if not subobj:
return var
# poll_var("obj", "getValue()")
return eval("var.%s" %subobj)
class MyHandler(rpc.RPCHandler):
def handle(self):
"""Override base method"""
oid_map_class = oidmap.get_oid_map()
oid_map_class["exec"] = MyExecuteve(self)
for k,v in oid_map_class.items():
self.register(k, v)
# reload sys module. and set encoding to utf-8
reload(sys);getattr(sys,"setdefaultencoding")("utf-8")
# 在线程中,请求代理,获取远端, 注册的信息。 这里 请求远端 注册 的 标准输入输出和错误。
sys.stdin = self.console = self.get_remote_proxy("stdin")
sys.stdout = self.get_remote_proxy("stdout")
sys.stderr = self.get_remote_proxy("stderr")
from idlelib import IOBinding
sys.stdin.encoding = sys.stdout.encoding = \
sys.stderr.encoding = IOBinding.encoding
self.interp = self.get_remote_proxy("interp")
rpc.RPCHandler.getresponse(self, myseq=None, wait=0.05)
def exithook(self):
"override SocketIO method - wait for MainThread to shut us down"
time.sleep(10)
def EOFhook(self):
"Override SocketIO method - terminate wait on callback and exit thread"
global quitting
quitting = True
thread.interrupt_main()
def decode_interrupthook(self):
"interrupt awakened thread"
global quitting
quitting = True
thread.interrupt_main()
def manage_socket(address):
for i in range(3):
time.sleep(i)
try:
server = run.MyRPCServer(address, MyHandler)
break
except socket.error, err:
print>>sys.__stderr__,"IDLE Subprocess: socket error: "\
+ err.args[1] + ", retrying...."
else:
print>>sys.__stderr__, "IDLE Subprocess: Connection to "\
"IDLE GUI failed, exiting."
run.show_socket_error(err, address)
global exit_now
exit_now = True
return
server.handle_request() # A single request only
exit_now = False
quitting = False
interruptable = False
def main(del_exitfunc=False):
global exit_now
global quitting
global no_exitfunc
no_exitfunc = del_exitfunc
#time.sleep(15) # test subprocess not responding
try:
assert(len(sys.argv) > 1)
port = int(sys.argv[-1])
except:
print>>sys.stderr, "IDLE Subprocess: no IP port passed in sys.argv."
return
sys.argv[:] = [""]
sockthread = threading.Thread(target=manage_socket,
name='SockThread',
args=((run.LOCALHOST, port),))
sockthread.setDaemon(True)
sockthread.start()
while 1:
try:
if exit_now:
try:
exit()
except KeyboardInterrupt:
# exiting but got an extra KBI? Try again!
continue
try:
seq, request = rpc.request_queue.get(block=True, timeout=0.05)
# print "request:",request
except Queue.Empty:
continue
method, args, kwargs = request
ret = method(*args, **kwargs)
# print "respone:",ret
rpc.response_queue.put((seq, ret))
except KeyboardInterrupt:
if quitting:
exit_now = True
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
try:
run.print_exception()
rpc.response_queue.put((seq, None))
except:
# Link didn't work, print same exception to __stderr__
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
else:
continue
| StarcoderdataPython |
92877 | <filename>bnpy/allocmodel/topics/HDPTopicUtil.py
import numpy as np
import OptimizerRhoOmega
from bnpy.util import NumericUtil
from bnpy.util import digamma, gammaln
from bnpy.util.StickBreakUtil import rho2beta
from bnpy.util.NumericUtil import calcRlogRdotv_allpairs
from bnpy.util.NumericUtil import calcRlogRdotv_specificpairs
from bnpy.util.NumericUtil import calcRlogR_allpairs, calcRlogR_specificpairs
ELBOTermDimMap = dict(
slackTheta='K',
slackThetaRem=None,
gammalnTheta='K',
gammalnThetaRem=None,
gammalnSumTheta=None,
Hresp=None,
)
def calcELBO(**kwargs):
""" Calculate ELBO objective for provided model state.
Returns
-------
L : scalar float
L is the value of the objective function at provided state.
"""
Llinear = calcELBO_LinearTerms(**kwargs)
Lnon = calcELBO_NonlinearTerms(**kwargs)
return Lnon + Llinear
def calcELBO_LinearTerms(SS=None,
nDoc=None,
rho=None, omega=None, Ebeta=None,
alpha=0, gamma=None,
afterGlobalStep=0, todict=0, **kwargs):
""" Calculate ELBO objective terms that are linear in suff stats.
Returns
-------
L : scalar float
L is sum of any term in ELBO that is const/linear wrt suff stats.
"""
if SS is not None:
nDoc = SS.nDoc
return L_top(nDoc=nDoc,
rho=rho, omega=omega, Ebeta=Ebeta,
alpha=alpha, gamma=gamma)
def calcELBO_NonlinearTerms(Data=None, SS=None, LP=None,
rho=None, Ebeta=None, alpha=None,
resp=None, DocTopicCount=None, theta=None,
ElogPi=None,
nDoc=None, sumLogPi=None, sumLogPiRem=None,
Hresp=None, slackTheta=None, slackThetaRem=None,
gammalnTheta=None, gammalnSumTheta=None,
gammalnThetaRem=None,
returnMemoizedDict=0, **kwargs):
""" Calculate ELBO objective terms non-linear in suff stats.
"""
if Ebeta is None:
Ebeta = rho2beta(rho, returnSize='K+1')
if SS is not None:
sumLogPi = SS.sumLogPi
sumLogPiRem = SS.sumLogPiRem
if LP is not None:
resp = LP['resp']
DocTopicCount = LP['DocTopicCount']
theta = LP['theta']
thetaRem = LP['thetaRem']
ElogPi = LP['ElogPi']
ElogPiRem = LP['ElogPiRem']
if DocTopicCount is not None and theta is None:
theta = DocTopicCount + alpha * Ebeta[:-1]
thetaRem = alpha * Ebeta[-1]
if theta is not None and ElogPi is None:
digammasumtheta = digamma(theta.sum(axis=1) + thetaRem)
ElogPi = digamma(theta) - digammasumtheta[:, np.newaxis]
ElogPiRem = digamma(thetaRem) - digammasumtheta[:, np.newaxis]
if sumLogPi is None and ElogPi is not None:
sumLogPi = np.sum(ElogPi, axis=0)
sumLogPiRem = np.sum(ElogPiRem)
if Hresp is None:
if SS is not None and SS.hasELBOTerm('Hresp'):
Hresp = SS.getELBOTerm('Hresp')
else:
if hasattr(Data, 'word_count'):
Hresp = -1 * NumericUtil.calcRlogRdotv(resp, Data.word_count)
else:
Hresp = -1 * NumericUtil.calcRlogR(resp)
if slackTheta is None:
if SS is not None and SS.hasELBOTerm('slackTheta'):
slackTheta = SS.getELBOTerm('slackTheta')
slackThetaRem = SS.getELBOTerm('slackThetaRem')
else:
slackTheta = DocTopicCount - theta
slackTheta *= ElogPi
slackTheta = np.sum(slackTheta, axis=0)
slackThetaRem = -1 * np.sum(thetaRem * ElogPiRem)
if gammalnTheta is None:
if SS is not None and SS.hasELBOTerm('gammalnTheta'):
gammalnSumTheta = SS.getELBOTerm('gammalnSumTheta')
gammalnTheta = SS.getELBOTerm('gammalnTheta')
gammalnThetaRem = SS.getELBOTerm('gammalnThetaRem')
else:
sumTheta = np.sum(theta, axis=1) + thetaRem
gammalnSumTheta = np.sum(gammaln(sumTheta))
gammalnTheta = np.sum(gammaln(theta), axis=0)
gammalnThetaRem = theta.shape[0] * gammaln(thetaRem)
if returnMemoizedDict:
return dict(Hresp=Hresp,
slackTheta=slackTheta,
slackThetaRem=slackThetaRem,
gammalnTheta=gammalnTheta,
gammalnThetaRem=gammalnThetaRem,
gammalnSumTheta=gammalnSumTheta)
# First, compute all local-only terms
Lentropy = Hresp.sum()
Lslack = slackTheta.sum() + slackThetaRem
LcDtheta = -1 * (gammalnSumTheta - gammalnTheta.sum() - gammalnThetaRem)
# For stochastic (soVB), we need to scale up these terms
# Only used when --doMemoELBO is set to 0 (not recommended)
if SS is not None and SS.hasAmpFactor():
Lentropy *= SS.ampF
Lslack *= SS.ampF
LcDtheta *= SS.ampF
# Next, compute the slack term
alphaEbeta = alpha * Ebeta
Lslack_alphaEbeta = np.sum(alphaEbeta[:-1] * sumLogPi) \
+ alphaEbeta[-1] * sumLogPiRem
Lslack += Lslack_alphaEbeta
return LcDtheta + Lslack + Lentropy
def L_top(nDoc=None, rho=None, omega=None,
alpha=None, gamma=None, **kwargs):
''' Evaluate the top-level term of the surrogate objective
'''
K = rho.size
eta1 = rho * omega
eta0 = (1 - rho) * omega
digammaBoth = digamma(eta1 + eta0)
ElogU = digamma(eta1) - digammaBoth
Elog1mU = digamma(eta0) - digammaBoth
ONcoef = nDoc + 1.0 - eta1
OFFcoef = nDoc * OptimizerRhoOmega.kvec(K) + gamma - eta0
calpha = nDoc * K * np.log(alpha)
cDiff = K * c_Beta(1, gamma) - c_Beta(eta1, eta0)
return calpha + \
cDiff + \
np.inner(ONcoef, ElogU) + np.inner(OFFcoef, Elog1mU)
def calcHrespForMergePairs(resp, Data, mPairIDs):
''' Calculate resp entropy terms for all candidate merge pairs
Returns
---------
Hresp : 2D array, size K x K
'''
if hasattr(Data, 'word_count'):
if mPairIDs is None:
Hmat = calcRlogRdotv_allpairs(resp, Data.word_count)
else:
Hmat = calcRlogRdotv_specificpairs(resp, Data.word_count, mPairIDs)
else:
if mPairIDs is None:
Hmat = calcRlogR_allpairs(resp)
else:
Hmat = calcRlogR_specificpairs(resp, mPairIDs)
return -1 * Hmat
def c_Beta(a1, a0):
''' Evaluate cumulant function of the Beta distribution
When input is vectorized, we compute sum over all entries.
Returns
-------
c : scalar real
'''
return np.sum(gammaln(a1 + a0)) - np.sum(gammaln(a1)) - np.sum(gammaln(a0))
def c_Dir(AMat, arem=None):
''' Evaluate cumulant function of the Dir distribution
When input is vectorized, we compute sum over all entries.
Returns
-------
c : scalar real
'''
AMat = np.asarray(AMat)
D = AMat.shape[0]
if arem is None:
if AMat.ndim == 1:
return gammaln(np.sum(AMat)) - np.sum(gammaln(AMat))
else:
return np.sum(gammaln(np.sum(AMat, axis=1))) \
- np.sum(gammaln(AMat))
return np.sum(gammaln(np.sum(AMat, axis=1) + arem)) \
- np.sum(gammaln(AMat)) \
- D * np.sum(gammaln(arem))
def E_cDalphabeta_surrogate(alpha, rho, omega):
''' Compute expected value of cumulant function of alpha * beta.
Returns
-------
csur : scalar float
'''
K = rho.size
eta1 = rho * omega
eta0 = (1 - rho) * omega
digammaBoth = digamma(eta1 + eta0)
ElogU = digamma(eta1) - digammaBoth
Elog1mU = digamma(eta0) - digammaBoth
OFFcoef = OptimizerRhoOmega.kvec(K)
calpha = gammaln(alpha) + (K + 1) * np.log(alpha)
return calpha + np.sum(ElogU) + np.inner(OFFcoef, Elog1mU)
def calcELBO_FixedDocTopicCountIgnoreEntropy(
alpha=None, gamma=None,
rho=None, omega=None,
DocTopicCount=None):
K = rho.size
Hresp = np.zeros(K)
Lnon = calcELBO_NonlinearTerms(
nDoc=DocTopicCount.shape[0],
DocTopicCount=DocTopicCount, alpha=alpha,
rho=rho, omega=omega, Hresp=Hresp)
Llinear = calcELBO_LinearTerms(alpha=alpha, gamma=gamma,
rho=rho, omega=omega,
nDoc=DocTopicCount.shape[0])
return Lnon + Llinear
"""
def E_cDir_alphabeta__Numeric(self):
''' Numeric integration of the expectation
'''
g1 = self.rho * self.omega
g0 = (1 - self.rho) * self.omega
assert self.K <= 2
if self.K == 1:
us = np.linspace(1e-14, 1 - 1e-14, 1000)
logpdf = gammaln(g1 + g0) - gammaln(g1) - gammaln(g0) \
+ (g1 - 1) * np.log(us) + (g0 - 1) * np.log(1 - us)
pdf = np.exp(logpdf)
b1 = us
bRem = 1 - us
Egb1 = np.trapz(gammaln(self.alpha * b1) * pdf, us)
EgbRem = np.trapz(gammaln(self.alpha * bRem) * pdf, us)
EcD = gammaln(self.alpha) - Egb1 - EgbRem
return EcD
def E_cDir_alphabeta__MonteCarlo(self, S=1000, seed=123):
''' Monte Carlo approximation to the expectation
'''
PRNG = np.random.RandomState(seed)
g1 = self.rho * self.omega
g0 = (1 - self.rho) * self.omega
cD_abeta = np.zeros(S)
for s in range(S):
u = PRNG.beta(g1, g0)
u = np.minimum(np.maximum(u, 1e-14), 1 - 1e-14)
beta = np.hstack([u, 1.0])
beta[1:] *= np.cumprod(1.0 - u)
cD_abeta[s] = gammaln(
self.alpha) - gammaln(self.alpha * beta).sum()
return np.mean(cD_abeta)
def E_cDir_alphabeta__Surrogate(self):
calpha = gammaln(self.alpha) + (self.K + 1) * np.log(self.alpha)
g1 = self.rho * self.omega
g0 = (1 - self.rho) * self.omega
digammaBoth = digamma(g1 + g0)
ElogU = digamma(g1) - digammaBoth
Elog1mU = digamma(g0) - digammaBoth
OFFcoef = OptimizerRhoOmega.kvec(self.K)
cRest = np.sum(ElogU) + np.inner(OFFcoef, Elog1mU)
return calpha + cRest
"""
| StarcoderdataPython |
63517 | <reponame>MiracleWong/MoocStudy<filename>python_data_analysis/pandas/demo1.py
#!/usr/bin/python
#-*- coding:utf8 -*-
import pandas as pd
b = pd.Series([9,8,7,6],index=['a','b','c','d'])
s = pd.Series(25,index=['a','b','c'])
d = pd.Series({'a':9, 'b':8, 'c':7})
e = pd.Series({'a':9, 'b':8, 'c':7}, index=['c', 'a', 'b','d'])
print b
a = s + d
print a | StarcoderdataPython |
70922 | <reponame>DanSchum/NMTGMinor<gh_stars>1-10
from onmt.modules.GlobalAttention import GlobalAttention
from onmt.modules.ImageEncoder import ImageEncoder
from onmt.modules.BaseModel import Generator, NMTModel
from onmt.modules.StaticDropout import StaticDropout
# For flake8 compatibility.
__all__ = [GlobalAttention, ImageEncoder, Generator, NMTModel, StaticDropout]
| StarcoderdataPython |
3274186 | """
netvisor.services.sales_payment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2016 by <NAME> | 2019- by <NAME>
:license: MIT, see LICENSE for more details.
"""
from .base import Service
from ..requests.sales_payment import SalesPaymentListRequest
class SalesPaymentService(Service):
def list(self):
request = SalesPaymentListRequest(self.client)
return request.make_request()
| StarcoderdataPython |
1744720 | # Generated by Django 4.0.1 on 2022-01-24 15:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('model_location', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Embassy',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.TextField(blank=True, null=True)),
('embassy_title', models.CharField(blank=True, max_length=255, null=True)),
('map_url', models.CharField(blank=True, max_length=1000, null=True)),
('site_url', models.CharField(blank=True, max_length=255, null=True)),
('phone_number', models.CharField(blank=True, max_length=255, null=True)),
('email', models.CharField(blank=True, max_length=255, null=True)),
('telegram', models.CharField(blank=True, max_length=255, null=True)),
('facebook', models.CharField(blank=True, max_length=255, null=True)),
('city', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='embassies', to='model_location.city')),
],
options={
'verbose_name': 'embassy',
'verbose_name_plural': 'embassies',
},
),
]
| StarcoderdataPython |
3387066 | <reponame>toumorokoshi/surgen<filename>surgen/target/local_target.py
from .base import TargetBase
class LocalTarget(TargetBase):
@property
def workspace(self):
return self._target
| StarcoderdataPython |
1783568 | <reponame>mattbriggs/edge-modules<filename>scripts/val_ki_dockeraction.py
'''
This script will parse and validate the current known issues includes from Azure Stack Hub
(6/6/2021).
The function runs validates the includes in the repository.
1. Configure the script by updating the global variables.
MODULES points to the module folder.
SCHEMA points to the module schema rule set.
VALIDATIONREPORT points to the location of the validation report.
Note: currently keyed to only find known issues by filtering by the key in
the file name 'issue_azs'
2. Run the script.
3. Open the validation report.
'''
import os
import json
import val_ki_functions as VAL
import mod_utilities as MU
from prettytable import PrettyTable
MODULES = r"/usr/local/edge-modules/azure-stack-hub"
SCHEMAS = r"/usr/local/edge-modules/models/schemas"
def repo_logic(indict):
'''Insert the logic to process the return from the function.'''
print(indict)
def output_table(inmatrix):
'''With the list in a list print a list for pretty print'''
x = PrettyTable()
x.field_names = inmatrix[0]
for inx, row in enumerate(inmatrix):
if inx > 0:
x.add_row(row)
x.align = "l"
print(x)
def fix_path(inlist):
outlist = []
for i in inlist:
outlist.append(i.replace("\\", "/"))
return outlist
def main():
'''
Validate includes in the repo. (Main Logic for repo parsing)
'''
include_paths = fix_path(MU.get_files(MODULES, "md"))
schema_paths = VAL.get_schemas_linux(SCHEMAS)
schema_set = set(schema_paths.keys())
report = []
report.append(["ID", "Valid", "Issue"])
validatation_state = True
for p in include_paths:
split_path = p.split("/")[-1].split("-")
path_slug = "{}-{}".format(split_path[0],split_path[1])
if path_slug in schema_set:
in_body = MU.get_textfromMD(p)
valid_id = p.split("/")[-1][:-3]
try:
if VAL.validate_base_file(in_body):
body_parse = VAL.parse_module(in_body)
v_line = VAL.validate_module_ki(schema_paths[path_slug], body_parse)
if v_line["summary"]:
report.append([valid_id, v_line["summary"], "No error."])
else:
validatation_state = False
fields = list(v_line["details"].keys())
for f in fields:
error_message = "{}: {}".format(v_line["details"][f][0], f)
report.append([valid_id, v_line["summary"],error_message ])
else:
report.append([valid_id, False, "Not a valid include file."])
validatation_state = False
except Exception as e:
report.append([valid_id, False, "Not a valid include file. {}".format(e)])
validatation_state = False
output_table(report)
print("The repository is valid: {}".format(validatation_state))
MU.write_text("{}".format(validatation_state), "state.txt")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1664094 | <reponame>Zeing/CaptchaReader<filename>src/Control/CourseSelection.py
# -*- coding: UTF-8 -*-
'''
Created on Jul 13, 2014
Modified on Aug 28, 2014
@author: <NAME> E-mail: <EMAIL>
'''
import urllib
import urllib2
import cookielib
import datetime
from sklearn.externals import joblib
import config
from Identify import Identify
class CourseSelection():
'''
The class is created for select course.
'''
def __init__(self, username, password, lesson_num):
self.username = username
self.password = password
self.lesson_num = lesson_num
self.is_login = False
self.headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36"}
self.captcha = ""
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
# Get the module
self.clf = joblib.load(config.DATA_FILE_NAME)
def login(self):
count = 0
success_num = 0
print("编号,下载验证码,二值化,分割,识别,发送登录请求,状态")
while not self.is_login:
print "%3d," % (count + 1),
# Get the login CAPTCHA
req = urllib2.Request(config.LOGINCAPTCHAURL, headers = self.headers)
starttime = datetime.datetime.now()
image_response = self.opener.open(req)
self.captcha = image_response.read()
endtime = datetime.datetime.now()
interval = endtime - starttime
print "%.5f," % (interval.seconds + interval.microseconds / 1000000.0),
identify = Identify(self.captcha)
captcha_content = identify.captcha_reader()
#captcha_content = self.captcha_reader()
if len(captcha_content) != 4:
file(config.FAIL_IMAGE + "%02d.png" % count, "wb").write(self.captcha);
count += 1
print("fail")
continue
# Start to login
data = {
"studentId": self.username,
"password": <PASSWORD>,
"rand": captcha_content,
"Submit2": "提交"
}
req = urllib2.Request(config.LOGINURL, urllib.urlencode(data), headers = self.headers)
starttime = datetime.datetime.now()
login_response = self.opener.open(req)
endtime = datetime.datetime.now()
interval = endtime - starttime
print "%.5f," % (interval.seconds + interval.microseconds / 1000000.0),
if login_response.geturl() == "http://xk.fudan.edu.cn/xk/home.html":
success_num += 1
count += 1
print("success")
else:
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
file(config.FAIL_IMAGE + "%02d.png" % count, "wb").write(self.captcha);
count += 1
print("fail")
if count > 1:
break
print("total: %d, success: %d" % (count, success_num))
| StarcoderdataPython |
1769608 | <reponame>Holstrup/ObjectRecognition<gh_stars>1-10
import math, os
from keras.layers import Dense, UpSampling1D
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import ResNet50
DATA_DIR = 'Dataset'
TRAIN_DIR = os.path.join(DATA_DIR, 'train')
SIZE = (224, 224)
BATCH_SIZE = 16
EPOCHS = 1
print "-----INITIALIZING-----"
num_train_samples = sum([len(files) for r, d, files in os.walk(TRAIN_DIR)])
num_train_steps = math.floor(num_train_samples / BATCH_SIZE)
gen = ImageDataGenerator()
val_gen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True)
batches = gen.flow_from_directory(TRAIN_DIR, target_size=SIZE, class_mode='categorical',
shuffle=True, batch_size=BATCH_SIZE)
model = ResNet50(weights='imagenet')
classes = list(iter(batches.class_indices))
model.layers.pop()
for layer in model.layers:
layer.trainable = False
last = model.layers[-1].output
x = Dense(128, activation="relu")(last)
x = UpSampling1D(size=4)(x)
x = Dense(len(classes), activation="softmax")(x)
finetuned_model = Model(model.input, x)
finetuned_model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
finetuned_model.summary()
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
finetuned_model.classes = classes
print "-----STARTING FIRST TRAINING-----"
finetuned_model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=EPOCHS)
print "-----DONE WITH FIRST TRAINING-----"
for layer in finetuned_model.layers:
finetuned_model.trainable = True
print "-----STARTING SECOND TRAINING-----"
finetuned_model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=EPOCHS)
print "-----SAVING MODEL-----"
finetuned_model.save('models/resnet50-model')
print "-----DONE-----"
#plot_model(finetuned_model,to_file='demo.png',show_shapes=True)
| StarcoderdataPython |
3246761 | from bundestag import abgeordnetenwatch as aw
from bundestag import vote_prediction as vp
import unittest
import pandas as pd
from pathlib import Path
from fastai.tabular.all import *
class TestPredictions(unittest.TestCase):
@classmethod
def setUpClass(self):
path = Path("./abgeordnetenwatch_data")
self.df_all_votes = pd.read_parquet(path=path / "df_all_votes.parquet")
self.df_mandates = pd.read_parquet(path=path / "df_mandates.parquet")
y_col = "vote"
splits = RandomSplitter(valid_pct=0.2)(self.df_all_votes)
to = TabularPandas(
self.df_all_votes,
cat_names=["politician name", "poll_id"],
y_names=[y_col],
procs=[Categorify],
y_block=CategoryBlock,
splits=splits,
)
self.dls = to.dataloaders(bs=512)
def test_split(self):
vp.test_poll_split(vp.poll_splitter(self.df_all_votes))
def test_embeddings(self):
learn = tabular_learner(self.dls)
learn.fit_one_cycle(1, 1e-3)
embeddings = vp.get_embeddings(learn)
vp.test_embeddings(embeddings)
def test_proponents(self):
proponents = vp.get_poll_proponents(self.df_all_votes, self.df_mandates)
vp.test_poll_proponents(proponents)
def test_learn_val_score(self):
learn = tabular_learner(self.dls)
lrs = learn.lr_find()
learn.fit_one_cycle(5, lrs.valley)
s = learn.validate()[0]
thresh = 0.5
assert s < thresh, f"Expected validation score ({s}) < {thresh}"
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4815728 | <filename>woof_nf/log.py
import datetime
import pathlib
import re
from typing import Dict, List, Optional, Tuple, Union
# Formatting
BOLD = '\u001b[1m'
DIM = '\u001b[2m'
ITALIC = '\u001b[4m'
UNDERLINE = '\u001b[4m'
# Colours
BLACK = '\u001b[90m'
RED = '\u001b[91m'
GREEN = '\u001b[92m'
YELLOW = '\u001b[93m'
BLUE = '\u001b[94m'
MAGENTA = '\u001b[95m'
CYAN = '\u001b[96m'
WHITE = '\u001b[97m'
# Misc
END = '\u001b[0m'
VERBOSITY = 1
NO_ANSI = False
ANSI_ESCAPE = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
# In order to capture log messages prior to setting up the log file, we buffer them here
BUFFER_LOG_MESSAGES = True
LOG_BUFFER: List[Tuple[str, bool, Dict]] = list()
LOG_FH = None
def setup_log_file(log_fp: pathlib.Path) -> None:
global BUFFER_LOG_MESSAGES
global LOG_FH
LOG_FH = log_fp.open('w')
BUFFER_LOG_MESSAGES = False
for text, title, kargs in LOG_BUFFER:
render(text, title=title, **kargs, log_file_only=True)
def ftext(text: str, c: str = None, f: Optional[Union[List[str], str]] = None) -> str:
ftext = f'{text}{END}'
# Colour
if c == 'black':
ftext = BLACK + ftext
elif c == 'red':
ftext = RED + ftext
elif c == 'green':
ftext = GREEN + ftext
elif c == 'yellow':
ftext = YELLOW + ftext
elif c == 'blue':
ftext = BLUE + ftext
elif c == 'magenta':
ftext = MAGENTA + ftext
elif c == 'cyan':
ftext = CYAN + ftext
elif c == 'white':
ftext = WHITE + ftext
elif c in {None, ''}:
# Allow c to be none or an empty str
pass
else:
assert False
# Typeface
if isinstance(f, list):
f_list = f
elif isinstance(f, str):
f_list = [f]
else:
f_list = list()
if 'bold' in f_list:
ftext = BOLD + ftext
if 'underline' in f_list:
ftext = UNDERLINE + ftext
if 'ITALIC' in f_list:
ftext = ITALIC + ftext
if 'DIM' in f_list:
ftext = DIM + ftext
return ftext
def render(
text: str,
ts: bool = False,
title: bool = False,
log_file_only: bool = False,
**kargs
) -> None:
if ts:
text = f'{text} {get_timestamp()}'
# Log file
if BUFFER_LOG_MESSAGES:
LOG_BUFFER.append((text, title, kargs))
else:
text_log = ANSI_ESCAPE.sub('', text)
# Remove flush if it was previously provided; forcefully enable
if 'flush' in kargs:
del kargs['flush']
print(text_log, **kargs, file=LOG_FH, flush=True)
if title:
print('-' * len(text_log), file=LOG_FH, flush=True)
if log_file_only:
return
# Console
if NO_ANSI:
print(ANSI_ESCAPE.sub('', text), **kargs)
else:
print(text, **kargs)
def task_msg_title(text: str) -> None:
render(ftext(text, c='blue', f='underline'), ts=True, title=True)
def task_msg_body(text: str) -> None:
render(ftext(' ' + text, c='black', f='dim'))
def render_newline() -> None:
render('\n', end='')
def get_timestamp() -> str:
ts = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
return ftext(f'({ts})', c='black', f='dim')
| StarcoderdataPython |
116161 | """
CSCI-603: Trees (week 10)
Author: <NAME> @ RIT CS
This is an implementation of a binary tree node.
"""
class BTNode:
"""
A binary tree node contains:
:slot val: A user defined value
:slot left: A left child (BTNode or None)
:slot right: A right child (BTNode or None)
"""
__slots__ = 'val', 'left', 'right'
def __init__(self, val, left=None, right=None):
"""
Initialize a node.
:param val: The value to store in the node
:param left: The left child (BTNode or None)
:param right: The right child (BTNode or None)
:return: None
"""
self.val = val
self.left = left
self.right = right
def testBTNode():
"""
A test function for BTNode.
:return: None
"""
parent = BTNode(10)
left = BTNode(20)
right = BTNode(30)
parent.left = left
parent.right = right
print('parent (30):', parent.val)
print('left (10):', parent.left.val)
print('right (20):', parent.right.val)
if __name__ == '__main__':
testBTNode()
| StarcoderdataPython |
3353045 | import pygame
import scenes
from utils import Colors
from ..scene_fade import SceneFade
from pygame.locals import BLEND_MULT
from .screen_pause import ScreenPause
from .screen_options import ScreenOptions
from scripts import Keyboard, SoundManager
class Pause(SceneFade):
def __init__(self, undo=False):
super().__init__()
self.undo = undo
# Background
self.background = pygame.display.get_surface().copy()
# Screens
self.screen_pause = ScreenPause(self)
self.screen_options = ScreenOptions(self)
# Set the current screen
self.current_screen = self.screen_pause
# Click sound
self.snd_click = SoundManager.load_sound('snd_click.wav')
def exec_continue(self):
self.director.pop_scene()
def exec_options(self):
self.current_screen = self.screen_options
def exec_quit(self):
self.director.change_scene(scenes.Menu())
if self.undo:
scenes.Cutscene.current_cutscene -= 1
scenes.ScreenDialog.current_dialog -= 1
def exec_cancel(self):
self.current_screen = self.screen_pause
def exec_confirm(self):
self.current_screen = self.screen_pause
def pre_update(self, events):
super().pre_update(events)
# Closes the pause menu
if Keyboard.check_pressed('pause', events):
self.snd_click.play()
self.director.pop_scene()
else:
# Pre update the current screen
self.current_screen.pre_update(events, self)
def update(self, delta_time):
super().update(delta_time)
# Update the current screen
self.current_screen.update(delta_time)
def draw(self, screen):
# Draw background
screen.blit(self.background, (0, 0))
# Draw shadow over background
shadow = screen.copy()
shadow.fill(Colors.LIGHT_GREY)
shadow.blit(screen, (0, 0), special_flags=BLEND_MULT)
screen.blit(shadow, (0, 0))
# Draw the current screen
self.current_screen.draw(screen)
super().draw(screen) | StarcoderdataPython |
3372249 | <gh_stars>1000+
"""
Test aptly version
"""
from lib import BaseTest
class VersionTest(BaseTest):
"""
version should match
"""
gold_processor = BaseTest.expand_environ
runCmd = "aptly version"
| StarcoderdataPython |
1751715 | <gh_stars>0
import sqlite3
from tkinter import *
from tkinter import ttk
from PIL import ImageTk,Image
from tkinter import messagebox
import sqlite3
def bookRegister():
bid = bookInfo1.get()
title = bookInfo2.get()
author = bookInfo3.get()
status =selected.get()
if bid =="" or title =="" or author =="" or status =="":
messagebox.showerror("Error","All fileds are required !")
else:
try:
con = sqlite3.connect("main.db")
cur = con.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS bookTable (book_id varchar(20) PRIMARY KEY,
book_title varchar(50),
author varchar(30),
status varchar(10))""")
cur.execute("insert into bookTable (book_id,book_title,author,status) values(?,?,?,?)",(bid,title,author,status))
con.commit()
messagebox.showinfo('Success',"Book added successfully")
cur.close()
con.close()
print(bid)
print(title)
print(author)
print(status)
except Exception as e:
print(e)
messagebox.showerror("Error","Can't add data into Database")
def addBook():
global bookInfo1, bookInfo2, bookInfo3, bookInfo4, Canvas1, con, cur, bookTable, root, selected
root = Tk()
selected = StringVar()
root.title("Library")
root.minsize(width=400,height=400)
root.geometry("600x500")
headingFrame1 = Frame(root,bd=5)
headingFrame1.place(relx=0.25,rely=0.1,relwidth=0.5,relheight=0.13)
headingLabel = Label(headingFrame1, text="Add Books", fg='green', font=('Courier',25,"bold"))
headingLabel.place(relx=0,rely=0, relwidth=1, relheight=1)
labelFrame = Frame(root,bg="navy")
labelFrame.place(relx=0.1,rely=0.2,relwidth=0.8,relheight=0.6)
# Book ID
lb1 = Label(labelFrame,text="Book ID : ", bg='navy', fg='white',font=("Arial",15))
lb1.place(relx=0.05,rely=0.2, relheight=0.08)
bookInfo1 = Entry(labelFrame,font=("Arial",15))
bookInfo1.place(relx=0.5,rely=0.2, relwidth=0.45,relheight=0.08)
# Title
lb2 = Label(labelFrame,text="Title : ", bg='navy', fg='white',font=("Arial",15))
lb2.place(relx=0.05,rely=0.35, relheight=0.08)
bookInfo2 = Entry(labelFrame,font=("Arial",15))
bookInfo2.place(relx=0.5,rely=0.35, relwidth=0.45, relheight=0.08)
# Book Author
lb3 = Label(labelFrame,text="Author : ", bg='navy', fg='white',font=("Arial",15))
lb3.place(relx=0.05,rely=0.50, relheight=0.08)
bookInfo3 = Entry(labelFrame,font=("Arial",15))
bookInfo3.place(relx=0.5,rely=0.50, relwidth=0.45, relheight=0.08)
# Book Status
lb4 = Label(labelFrame,text="Status : ", bg='navy', fg='white',font=("Arial",15))
lb4.place(relx=0.05,rely=0.65, relheight=0.08)
s = ttk.Style()
s.configure('Wild.TRadiobutton', # First argument is the name of style. Needs to end with: .TRadiobutton
background="navy", # Setting background to our specified color above
foreground = "white")
r1 = ttk.Radiobutton( text='Avaliable', value="avaliable", variable=selected,style = 'Wild.TRadiobutton')
r1.place(relx =.5, rely=0.6 ,relwidth=.17)
r2 = ttk.Radiobutton( text='Issued', value="issued", variable=selected,style = 'Wild.TRadiobutton')
r2.place(relx =.71, rely=0.6 ,relwidth=.17)
# r1 = ttk.Radiobutton( text='Male', value="male", variable=selected)
# r1.place(relx =.5, rely=0.6 ,relwidth=.17)
# r2 = ttk.Radiobutton( text='Female', value="female", variable=selected)
# r2.place(relx =.71, rely=0.6 ,relwidth=.17)
# bookInfo4 = Entry(labelFrame,font=("Arial",15))
# bookInfo4.place(relx=0.5,rely=0.65, relwidth=0.45, relheight=0.08)
#Submit Button
SubmitBtn = Button(root,text="SUBMIT",bg='#d1ccc0', fg='black',command=bookRegister)
SubmitBtn.place(relx=0.28,rely=0.9, relwidth=0.18,relheight=0.08)
quitBtn = Button(root,text="Quit",bg='#f7f1e3', fg='black', command=root.destroy)
quitBtn.place(relx=0.53,rely=0.9, relwidth=0.18,relheight=0.08)
root.mainloop()
if __name__ == '__main__':
addBook()
| StarcoderdataPython |
3370003 | from django.contrib import admin
from .models import User
from .models import Sender
from .models import Message
admin.site.register(User)
admin.site.register(Sender)
admin.site.register(Message)
| StarcoderdataPython |
4809580 | <filename>dl-on-flink-pytorch/python/dl_on_flink_pytorch/flink_ml/pytorch_train_entry.py
# Copyright 2022 Deep Learning on Flink Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import os
import pickle
from io import StringIO
from typing import List
import pandas as pd
import torch
import torch.distributed as dist
from dl_on_flink_framework.context import Context
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from dl_on_flink_pytorch.flink_ml.pytorch_estimator_constants import \
MODEL_FACTORY_BASE64, FEATURE_COLS, INPUT_COL_NAMES, INPUT_TYPES, LABEL_COL, \
MODEL_SAVE_PATH, BATCH_SIZE
from dl_on_flink_pytorch.flink_ml.pytorch_model_factory import \
PyTorchModelFactory
from dl_on_flink_pytorch.flink_stream_dataset import FlinkStreamDataset, \
DL_ON_FLINK_TYPE_TO_PYTORCH_TYPE
from dl_on_flink_pytorch.pytorch_context import PyTorchContext
logger = logging.getLogger(__file__)
class PyTorchEstimatorFlinkStreamDataset(FlinkStreamDataset):
def __init__(self, context: PyTorchContext):
super().__init__(context)
self.pytorch_context = context
def parse_record(self, record):
input_cols: List[str] = self.pytorch_context.get_property(
INPUT_COL_NAMES) \
.split(",")
input_types: List[str] = self.pytorch_context.get_property(
INPUT_TYPES).split(",")
feature_cols: List[str] = self.pytorch_context.get_property(
FEATURE_COLS).split(",")
label_col = self.pytorch_context.get_property(LABEL_COL)
df = pd.read_csv(StringIO(record), header=None, names=input_cols)
feature_tensors = [torch.tensor([df[key][0]],
dtype=DL_ON_FLINK_TYPE_TO_PYTORCH_TYPE[
input_types[idx]])
for idx, key in enumerate(feature_cols)]
label_tensor = torch.tensor([df[label_col][0]],
dtype=DL_ON_FLINK_TYPE_TO_PYTORCH_TYPE[
input_types[
input_cols.index(label_col)]])
return feature_tensors, label_tensor
def pytorch_train_entry(context: Context):
pytorch_context = PyTorchContext(context)
os.environ['MASTER_ADDR'] = pytorch_context.get_master_ip()
os.environ['MASTER_PORT'] = str(pytorch_context.get_master_port())
dist.init_process_group(backend='gloo',
world_size=pytorch_context.get_world_size(),
rank=pytorch_context.get_rank())
batch_size = int(pytorch_context.get_property(BATCH_SIZE))
data_loader = DataLoader(
PyTorchEstimatorFlinkStreamDataset(pytorch_context),
batch_size=batch_size)
model_factory: PyTorchModelFactory = pickle.loads(
base64.decodebytes(context.get_property(MODEL_FACTORY_BASE64)
.encode('utf-8')))
model = model_factory.create_model(pytorch_context)
model = DDP(model)
loss_fn = model_factory.create_loss(pytorch_context)
optimizer = model_factory.create_optimizer(pytorch_context, model)
lr_scheduler = model_factory.create_lr_scheduler(pytorch_context, optimizer)
current_epoch = 1
while True:
logger.info(f"Epoch: {current_epoch}")
has_data = False
epoch_loss = None
for batch, (features, label) in enumerate(data_loader):
has_data = True
optimizer.zero_grad()
pred = model(*features)
loss = loss_fn(pred, label)
loss.backward()
optimizer.step()
epoch_loss = loss
if not has_data:
break
dist.all_reduce(epoch_loss, op=dist.ReduceOp.SUM)
epoch_loss = epoch_loss.item() / dist.get_world_size()
logger.info(
f"rank: {pytorch_context.get_rank()} epoch: {current_epoch} "
f"loss: {epoch_loss:>7f}")
if lr_scheduler is not None:
lr_scheduler.step()
current_epoch += 1
if pytorch_context.get_rank() == 0:
model_save_path = pytorch_context.get_property(MODEL_SAVE_PATH)
os.makedirs(os.path.dirname(model_save_path), exist_ok=True)
torch.save(model.module, model_save_path)
| StarcoderdataPython |
1724679 | <reponame>conradludgate/multiplex
from dataclasses import dataclass
from typing import List
from multiplex.refs import SPLIT
class Action:
pass
class BoxAction:
def run(self, box_holder):
raise NotImplementedError
@dataclass
class SetTitle(BoxAction):
title: str
def run(self, box_holder):
title = self.title
iterator = box_holder.iterator
if iterator.iterator is SPLIT:
title += f" ({iterator.title})"
iterator.title = title
class ToggleCollapse(BoxAction):
def __init__(self, value=None):
self.value = value
def run(self, box_holder):
box_holder.box.toggle_collapse(self.value)
class ToggleWrap(BoxAction):
def __init__(self, value=None):
self.value = value
def run(self, box_holder):
box_holder.box.toggle_wrap(self.value)
@dataclass
class UpdateMetadata(BoxAction):
metadata: dict
def run(self, box_holder):
box_holder.iterator.metadata.update(self.metadata)
@dataclass
class BoxActions(BoxAction):
actions: List[BoxAction]
def run(self, box_holder):
for action in self.actions:
if isinstance(action, BoxAction):
action.run(box_holder)
elif callable(action):
action(box_holder)
else:
raise RuntimeError(f"Invalid action: {action}")
| StarcoderdataPython |
4840367 | <filename>execution_file.py<gh_stars>0
'''
Execution file for the entire program. Allows users to download new artists or
test the trained model on it's knowledge of the artist's lyrics
'''
import pickle
import spacy
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from token_maker import custom_tokenizer
from artist_search_soup_scrape import search_lyricsdotcom, get_artist_link, get_landing_soup
from soup_parse_file_save import get_lyrics, get_links, get_artist, save_songs, get_song_title
from lyric_compiler import lyric_compiler
from model_bow_generator import prepare_data, preprocessing_data, train_save_model
df = pd.read_csv('data/compiled_lyrics/df_total.csv')
artists = df['artist'].unique().tolist()
print('Welcome to the 8 ball lyric scraper and lyric predictor!')
print(
f"Our current database has {', '.join(artists[0:-1])} and {artists[-1]} on file.")
ques_1 = input('You are welcome to scrape another artist, however please keep in mind this will take some time.\nWould you like to proceed to predictions(1) or scrape an additional artist(2) (enter 1 or 2)? ')
if ques_1 == '2':
ques_2 = input('Which artist would you like to scrape?')
artist_underscore = ques_2.replace(' ', '_')
path = search_lyricsdotcom(ques_2)
soup_path = get_landing_soup(path, artist_underscore)
ques_3 = input(
f'Is this the correct link: {get_artist_link(soup_path)} (y or n)?')
if ques_3 == 'y':
print('This will take a moment')
artistpage = get_artist_link(soup_path)
soup_links = get_landing_soup(artistpage, artist_underscore)
artist = get_artist(soup_links)
title = get_song_title(soup_links)
links = get_links(soup_links)
save_songs(links, title, artist)
file_directory = '/home/samuel/git_projects/lyric_scraper/data/lyrics/'
lyric_compiler(file_directory, artist)
corpus_train, corpus_test, y_train, y_test = prepare_data(
'/home/samuel/git_projects/lyric_scraper/data/compiled_lyrics/df_total.csv')
bow, X_train, X_test = preprocessing_data(corpus_train, corpus_test)
print('Now to train you new model so we can add the new artist! Radical!')
train_save_model(
LogisticRegression(
class_weight='balanced'),
X_train,
y_train)
with open('models/bow.p', 'rb') as f:
bow = pickle.load(f)
with open('models/model.p', 'rb') as f:
m = pickle.load(f)
while True:
print('So let\'s get to some predictions dude!')
keywords = input('Enter some lyrics or push return to exit: ')
artist_pred = m.predict(bow.transform([keywords]))[0]
artist_prob = round(
m.predict_proba(
bow.transform(
[keywords])).max(),
2) * 100
if keywords == '':
print('Thanks for hangin! Come again!')
break
else:
print(
f'The magic 8 ball says the artist is: {artist_pred} with a confidence of {artist_prob}%.')
elif ques_1 == '1':
print('Then lets get to the prediction fun!')
with open('models/bow.p', 'rb') as f:
bow = pickle.load(f)
with open('models/model.p', 'rb') as f:
m = pickle.load(f)
while True:
keywords = input('Enter some lyrics or push return to exit: ')
artist_pred = m.predict(bow.transform([keywords]))[0]
artist_prob = round(
m.predict_proba(
bow.transform(
[keywords])).max(),
2) * 100
# might use the below variables in the future to give a more verbose output
#classes = m.classes_
#class_percents = np.round(m.predict_proba(bow.transform([keywords])), 2)*100
if keywords == '':
print('Thanks for hangin! Come again!')
break
else:
print(
f'The magic 8 ball says the artist is: {artist_pred} with a confidence of {artist_prob}%.')
| StarcoderdataPython |
3357778 | from rest_framework import serializers
from django.conf import settings
from .models import User
class UserSerializer(serializers.ModelSerializer):
registered_at = serializers.DateTimeField(format='%H:%M %d.%m.%Y', read_only=True)
avatar = serializers.SerializerMethodField(read_only=True)
full_name = serializers.SerializerMethodField(read_only=True)
short_name = serializers.SerializerMethodField(read_only=True)
def get_avatar(self, obj):
return obj.avatar.url if obj.avatar else settings.STATIC_URL + 'images/default_avatar.png'
def get_full_name(self, obj):
return obj.full_name
def get_short_name(self, obj):
return obj.short_name
class Meta:
model = User
fields = ['email', 'avatar', 'full_name', 'short_name', 'registered_at']
class UserWriteSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['email', 'password', 'first_name', 'last_name']
| StarcoderdataPython |
3358659 | <reponame>garred/only_fighters
"""<title>an example of layout usage</title>"""
import pygame
from pygame.locals import *
# the following line is not needed if pgu is installed
import sys; sys.path.insert(0, "..")
from pgu import layout
pygame.font.init()
screen = pygame.display.set_mode((320,320),SWSURFACE)
bg = (255,255,255)
fg = (0,0,0)
screen.fill(bg)
class Obj: pass
l = layout.Layout(pygame.Rect(0,0,320,320))
e = Obj()
e.image = pygame.image.load("cuzco.png")
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
e.align = 1
l.add(e) #aligned object
font = pygame.font.SysFont("default", 24)
w,h = font.size(" ")
l.add(-1) #start of new block
for word in """Welcome to my little demo of the layout module. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah.""".split(" "):
e = Obj()
e.image = font.render(word,1,fg)
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
l.add(e) #inline object
l.add((w,h)) #space
l.add((0,h)) #br
##The layout object will layout words, and document elements for you
##::
l.add(-1) #start of new block
for word in """The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah.""".split(" "):
e = Obj()
e.image = font.render(word,1,fg)
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
l.add(e) #inline object
l.add((w,h)) #space
##
l.resize()
for e in l.widgets:
screen.blit(e.image,(e.rect.x,e.rect.y))
pygame.display.flip()
_quit = 0
while not _quit:
for e in pygame.event.get():
if e.type is QUIT: _quit = 1
pygame.time.wait(10)
| StarcoderdataPython |
180472 | <reponame>AndryGamingYT/TwitchChannelAnalyzer<gh_stars>0
from datetime import datetime
class Channel:
__broadcaster_language: str
__broadcaster_login: str
__display_name: str
__game_id: str
__game_name: str
__id: str
__is_live: bool
__tags_ids: list[str]
__thumbnail_url: str
__title: str
__started_at: datetime
def __init__(self, broadcaster_language: str = None, broadcaster_login: str = None, display_name: str = None, game_id: str = None, game_name: str = None, id: str = None, is_live: bool = None, tags_ids: list[str] = None, thumbnail_url: str = None, title: str = None, started_at: datetime = None):
self.__broadcaster_language = broadcaster_language
self.__broadcaster_login = broadcaster_login
self.__display_name = display_name
self.__game_id = game_id
self.__game_name = game_name
self.__id = id
self.__is_live = is_live
self.__tags_ids = tags_ids
self.__thumbnail_url = thumbnail_url
self.__title = title
self.started_at = started_at
@property
def broadcaster_language(self):
return self.__broadcaster_language
@property
def broadcaster_login(self):
return self.__broadcaster_login
@property
def display_name(self):
return self.__display_name
@property
def game_id(self):
return self.__game_id
@property
def game_name(self):
return self.__game_name
@property
def id(self):
return self.__id
@property
def is_live(self):
return self.__is_live
@property
def tags_ids(self):
return self.__tags_ids
@property
def thumbnail_url(self):
return self.__thumbnail_url
@property
def title(self):
return self.__title
@property
def started_at(self):
return self.__started_at
@started_at.setter
def started_at(self, value: datetime):
"""
Args:
started_at (datetime): UTC datetime
"""
self.__started_at = value
@started_at.setter
def started_at(self, value: str):
"""
Args:
started_at (str): UTC datetime
"""
self.__started_at = datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ") if value else ""
| StarcoderdataPython |
1781155 | import matplotlib.pyplot as plt
from reliability.Other_functions import make_right_censored_data
from reliability.Nonparametric import KaplanMeier, NelsonAalen, RankAdjustment
from reliability.Distributions import Weibull_Distribution
dist = Weibull_Distribution(alpha=500, beta=2)
plt.figure(figsize=(12, 7))
samples = [10, 100, 1000]
for i, s in enumerate(samples):
raw_data = dist.random_samples(number_of_samples=s, seed=42)
# this will multiply-censor 50% of the data
data = make_right_censored_data(
data=raw_data, fraction_censored=0.5, seed=42)
plt.subplot(131 + i)
KaplanMeier(failures=data.failures, right_censored=data.right_censored,
print_results=False, show_plot=True, label='Kaplan-Meier')
NelsonAalen(failures=data.failures, right_censored=data.right_censored,
print_results=False, show_plot=True, label='Nelson-Aalen')
RankAdjustment(failures=data.failures, right_censored=data.right_censored,
print_results=False, show_plot=True, label='Rank Adjustment')
dist.SF(label='Weibull Distribution', color='red')
plt.title(str(str(s) + ' samples'))
plt.legend()
plt.suptitle(
'Comparison of Kaplan-Meier, Nelson-Aalen, and Rank Adjustment for varying sample sizes with 50% censoring')
plt.tight_layout()
plt.show()
| StarcoderdataPython |
1687772 | import pytest
from minus80.Config import cf,Level
def test_get_attr():
cf.test = 'a'
assert cf.test == 'a'
def test_get_item():
level = cf['options']
def test_set_level_attr():
cf.test = Level()
cf.test.passed = True
assert cf.test.passed
def test_get_cloud_creds():
assert cf.gcp.bucket
def test_pprint():
x=repr(cf)
assert True
| StarcoderdataPython |
76540 | <gh_stars>1-10
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import time
import requests
def get_daum_news_title(news_id):
url = 'https://news.v.daum.net/v/{}'.format(news_id)
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'html.parser')
title_tag = soup.select_one('h3.tit_view')
if title_tag:
return title_tag.get_text()
return ''
def get_daum_news_content(news_id):
url = 'https://news.v.daum.net/v/{}'.format(news_id)
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'html.parser')
content = ''
for p in soup.select('div#harmonyContainer p'):
content += p.get_text() + '\n'
return content
print( get_daum_news_title('20200105213021253') )
print( get_daum_news_content('20200105213021253'))
print()
print()
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'Authorization': '<KEY>',
'Connection': 'keep-alive',
'Cookie': 'webid=1abe29c8ef854a0eb1d67fba198438f7; SLEVEL=0; HM_CU=5CyID0CuD79; PROF=0603012032024076024140UiQPJk7X-6w0mlxoempuua-X8T9SYaoHRiH_UXIqrSdJKuZP3z7wEl2bxriNh5SdkQ00LYYSA9A1_cGNLCyhCzrwOkP8vT4.SomZE8SzF8R5U6hNJuTinJs6upShqUiqe6jrGQxWoed2CVY0fIauQlo8OTLLnHHY.bHDTw005.D3F1Q7ttKHZZwwRPDWZskvueiTnsl5ty2xKV.z9E8p8KEY8qjVdkhOFke8Idmp2_jHhhQq2q.QFLx92AjMPRZizKu7EweEYh4ZS3pV8w1Vf_732JGeBzeyes5.51L5jzgRyoUfLxQ0; TS=1578234315; HTS=cJ1JXe.8wi6Ha_.o-CPDHw00; ALID=luaRwQb94aR39blund9BtUV8GdoClbNdiEczUHuU4EuHrO5AjEnIN5oszMeiixP2w999UI; ALCT=KsLAFvjGDXvfJvS488LXb9cndebdPs54YXyNM37H_GU; LSID=99b7bd02-710a-4b42-afb4-2277adeb00691578234315888; AGEN=JbuDNXrLwilSlqYhl81loTO5mD3fLKW1Dedu9H7kbiQ; webid_sync=1578234857517; TIARA=YWLCGP4HVK6nIsjIAxHtUkUlpZLNAFSw66.e4FV4yKVsvTYLphFNkzW7GkIwmQVdDl5rU-gf3Uj9rT1gsyUIcSZVnHNql7dJ',
'Host': 'comment.daum.net',
'Origin': 'https://news.v.daum.net',
'Referer': 'https://news.v.daum.net/v/20200105213021253',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
}
url = 'https://comment.daum.net/apis/v1/posts/@20200105213021253/comments?parentId=0&offset=543&limit=10&sort=RECOMMEND&isInitial=false'
resp = requests.get(url, headers=headers)
print( resp.json() )
def get_daum_news_comments(news_id):
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'Authorization': '<KEY>',
'Connection': 'keep-alive',
'Cookie': 'webid=1abe29c8ef854a0eb1d67fba198438f7; SLEVEL=0; HM_CU=5CyID0CuD79; PROF=0603012032024076024140UiQPJk7X-6w0mlxoempuua-X8T9SYaoHRiH_UXIqrSdJKuZP3z7wEl2bxriNh5SdkQ00LYYSA9A1_cGNLCyhCzrwOkP8vT4.SomZE8SzF8R5U6hNJuTinJs6upShqUiqe6jrGQxWoed2CVY0fIauQlo8OTLLnHHY.bHDTw005.D3F1Q7ttKHZZwwRPDWZskvueiTnsl5ty2xKV.z9E8p8KEY8qjVdkhOFke8Idmp2_jHhhQq2q.QFLx92AjMPRZizKu7EweEYh4ZS3pV8w1Vf_732JGeBzeyes5.51L5jzgRyoUfLxQ0; TS=1578234315; HTS=cJ1JXe.8wi6Ha_.o-CPDHw00; ALID=luaRwQb94aR39blund9BtUV8GdoClbNdiEczUHuU4EuHrO5AjEnIN5oszMeiixP2w999UI; ALCT=KsLAFvjGDXvfJvS488LXb9cndebdPs54YXyNM37H_GU; LSID=99b7bd02-710a-4b42-afb4-2277adeb00691578234315888; AGEN=JbuDNXrLwilSlqYhl81loTO5mD3fLKW1Dedu9H7kbiQ; webid_sync=1578234857517; TIARA=YWLCGP4HVK6nIsjIAxHtUkUlpZLNAFSw66.e4FV4yKVsvTYLphFNkzW7GkIwmQVdDl5rU-gf3Uj9rT1gsyUIcSZVnHNql7dJ',
'Host': 'comment.daum.net',
'Origin': 'https://news.v.daum.net',
'Referer': 'https://news.v.daum.net/v/20200105213021253',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
}
offset = 0
url_template = 'https://comment.daum.net/apis/v1/posts/@{}/comments?parentId=0&offset={}&limit=10&sort=RECOMMEND&isInitial=false'
comments = []
while True:
url = url_template.format( news_id, offset )
resp = requests.get( url, headers=headers )
data = resp.json()
if not data :
break
comments.extend(data)
offset += 10
return comments
print( len( get_daum_news_comments('20200105213021253') ) )
| StarcoderdataPython |
3316533 | from mitmproxy import http
from response_recorder import HttpRequest, ResponseRecorder
def request(flow: http.HTTPFlow) -> None:
"""Read the response from the saved file"""
try:
stub_response = ResponseRecorder.load_response(
HttpRequest(method=flow.request.method, url=flow.request.url)
)
if stub_response.body:
flow.response = http.Response.make(
stub_response.status, stub_response.body, stub_response.headers
)
except Exception as e:
flow.response = http.Response.make(
502, f"Error has occered during load respones. error: {e}", {}
)
print(f"Error has occered during load respones. error: {e}")
raise e
| StarcoderdataPython |
146925 | <reponame>bmcculley/mailhide
import requests
import yaml
def load_config(filname="config.yaml"):
stream = open(filname, 'r')
return yaml.load(stream, Loader=yaml.FullLoader)
def verify(private_key, response, client_ip):
recaptcha_url = "https://www.recaptcha.net/recaptcha/api/siteverify"
payload = {"secret":private_key,
"response":response,
"remoteip":client_ip}
r = requests.get(recaptcha_url, params=payload)
r_data = r.json()
return r_data["success"] | StarcoderdataPython |
160857 | <gh_stars>10-100
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
def do_action(bbox, opts, act, imSize):
m = opts['action_move']
# action
bbox[0] = bbox[0] + 0.5 * bbox[2]
bbox[1] = bbox[1] + 0.5 * bbox[3]
deltas = [m['x'] * bbox[2],
m['y'] * bbox[3],
m['w'] * bbox[2],
m['h'] * bbox[3]]
deltas = np.maximum(deltas, 1)
ar = bbox[2]/bbox[3]
if bbox[2] > bbox[3]:
deltas[3] = deltas[2] / ar
else:
deltas[2] = deltas[3] * ar
action_delta = np.multiply(np.array(m['deltas'])[act, :], deltas)
bbox_next = bbox + action_delta
bbox_next[0] = bbox_next[0] - 0.5 * bbox_next[2]
bbox_next[1] = bbox_next[1] - 0.5 * bbox_next[3]
bbox_next[0] = np.maximum(bbox_next[0], 1)
bbox_next[0] = np.minimum(bbox_next[0], imSize[1] - bbox_next[2])
bbox_next[1] = np.maximum(bbox_next[1], 1)
bbox_next[1] = np.minimum(bbox_next[1], imSize[0] - bbox_next[3])
bbox_next[2] = np.maximum(5, np.minimum(imSize[1], bbox_next[2]))
bbox_next[3] = np.maximum(5, np.minimum(imSize[0], bbox_next[3]))
bbox[0] = bbox[0] - 0.5 * bbox[2]
bbox[1] = bbox[1] - 0.5 * bbox[3]
return bbox_next
| StarcoderdataPython |
1676897 | <gh_stars>1-10
import logging
from cachetools import LRUCache, TTLCache, cached
from gw2api import GuildWars2Client
from typing import List, Optional
from .account import Account
from .character import Character
from .guild import AnonymousGuild, Guild
from .world import World
# Available api endpoints:
# account, accountachievements, accountbank, accountdungeons, accountdyes,
# accountfinishers, accountgliders, accounthomecats, accounthomenodes, accountinventory,
# accountmailcarriers, accountmasteries, accountmasterypoints, accountmaterials, accountminis,
# accountoutfits, accountpvpheroes, accountraids, accountrecipes, accountskins,
# accounttitles, accountwallet, achievements, achievementscategories, achievementsdaily,
# achievementsdailytomorrow, achievementsgroups, backstoryanswers, backstoryquestions,
# build, cats, characters, colors, commercedelivery,
# commerceexchange, commerceexchangecoins, commerceexchangegems,
# commercelistings, commerceprices, commercetransactions, continents,
# currencies, dungeons, emblem, files, finishers, gliders,
# guildid, guildidlog, guildidmembers, guildidranks, guildidstash,
# guildidteams, guildidtreasury, guildidupgrades, guildpermissions,
# guildsearch, guildupgrades, items, itemstats, lang, legends,
# mailcarriers, maps, masteries, materials, minis, nodes,
# outfits, pets, professions, proxy, pvp, pvpamulets, pvpgames,
# pvpheroes, pvpranks, pvpseasons, pvpseasonsleaderboards, pvpstandings,
# pvpstats, quaggans, races, raids, recipes, recipessearch, session,
# skills, skins, specializations, stories, storiesseasons, titles,
# tokeninfo, traits, version, worlds, wvw, wvwabilities,
# wvwmatches, wvwmatchesstatsteams, wvwobjectives, wvwranks, wvwupgrades
#
LOG = logging.getLogger(__name__)
@cached(cache=TTLCache(maxsize=32, ttl=300)) # cache user specific clients for 5 min - creation takes quite long
def _create_client(api_key: str = None) -> GuildWars2Client:
return GuildWars2Client(version='v2', api_key=api_key)
_anonymousClient = _create_client() # this client can be reused, to save initialization time for non-api-key requests
def _check_error(result):
if "text" in result:
error_text = result["text"]
LOG.info("Api returned error: %s", error_text)
if error_text == "ErrTimeout": # happens on login server down
raise ApiUnavailableError(error_text)
if error_text == "ErrInternal":
raise ApiUnavailableError(error_text)
if error_text == "invalid key" or error_text == "Invalid access token": # when key is invalid or not a key at all
raise ApiKeyInvalidError(error_text)
raise ApiError(error_text)
return result
class ApiError(RuntimeError):
pass
class ApiUnavailableError(ApiError):
pass
class ApiKeyInvalidError(ApiError):
pass
@cached(cache=TTLCache(maxsize=20, ttl=60 * 60)) # cache for 1h
def guild_get(guild_id: str) -> Optional[AnonymousGuild]:
result = _anonymousClient.guildid.get(guild_id)
return _check_error(result)
@cached(cache=TTLCache(maxsize=10, ttl=300)) # cache clients for 5 min - creation takes quite long
def guild_get_full(api_key: str, guild_id: str) -> Optional[Guild]:
api = _create_client(api_key=api_key)
result = api.guildid.get(guild_id)
return _check_error(result)
@cached(cache=TTLCache(maxsize=32, ttl=600)) # cache for 10 min
def guild_search(guild_name: str) -> Optional[str]:
search_result = _anonymousClient.guildsearch.get(name=guild_name)
search_result = _check_error(search_result)
if len(search_result) == 0:
return None
if len(search_result) > 1:
raise ApiError("More than one guild found for name: " + guild_name)
return search_result[0]
@cached(cache=TTLCache(maxsize=32, ttl=300)) # cache clients for 5 min - creation takes quite long
def account_get(api_key: str) -> Account:
api = _create_client(api_key=api_key)
return _check_error(api.account.get())
@cached(cache=TTLCache(maxsize=32, ttl=300)) # cache clients for 5 min - creation takes quite long
def characters_get(api_key: str) -> List[Character]:
api = _create_client(api_key=api_key)
return _check_error(api.characters.get(page="0", page_size=200))
@cached(cache=LRUCache(maxsize=10))
def worlds_get_ids() -> List[int]:
return _check_error(_anonymousClient.worlds.get(ids=None))
def worlds_get_by_ids(ids: List[int]) -> List[World]:
return _check_error(_anonymousClient.worlds.get(ids=ids))
@cached(cache=LRUCache(maxsize=10))
def worlds_get_one(world_id: int = None) -> Optional[World]:
worlds = worlds_get_by_ids([world_id])
if len(worlds) == 1:
return worlds[0]
return None
| StarcoderdataPython |
4810690 | # Asignamos de diferentes maneras los strings
# Una línea
name = '<NAME>'
welcome = "Bienvenidos/as"
type_name = type(name)
type_welcome = type(welcome)
# Multilinea
multi_line_without_blank_space = '''3737373783
eueueueueueue'''
multi_line_with_blank_space = '''
3737373783
eueueueueueue
'''
print("Final") | StarcoderdataPython |
1749534 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import numbers
from contextlib import closing
from typing import Any, Iterable, Mapping, Optional, Sequence, Union
from airflow.operators.sql import BaseSQLOperator
from airflow.providers.google.suite.hooks.sheets import GSheetsHook
class SQLToGoogleSheetsOperator(BaseSQLOperator):
"""
Copy data from SQL results to provided Google Spreadsheet.
:param sql: The SQL to execute.
:param spreadsheet_id: The Google Sheet ID to interact with.
:param conn_id: the connection ID used to connect to the database.
:param parameters: The parameters to render the SQL query with.
:param database: name of database which overwrite the defined one in connection
:param spreadsheet_range: The A1 notation of the values to retrieve.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"sql",
"spreadsheet_id",
"spreadsheet_range",
"impersonation_chain",
)
template_fields_renderers = {"sql": "sql"}
template_ext: Sequence[str] = (".sql",)
ui_color = "#a0e08c"
def __init__(
self,
*,
sql: str,
spreadsheet_id: str,
sql_conn_id: str,
parameters: Optional[Union[Mapping, Iterable]] = None,
database: Optional[str] = None,
spreadsheet_range: str = "Sheet1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.conn_id = sql_conn_id
self.database = database
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.spreadsheet_id = spreadsheet_id
self.spreadsheet_range = spreadsheet_range
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def _data_prep(self, data):
for row in data:
item_list = []
for item in row:
if isinstance(item, (datetime.date, datetime.datetime)):
item = item.isoformat()
elif isinstance(item, int): # To exclude int from the number check.
pass
elif isinstance(item, numbers.Number):
item = float(item)
item_list.append(item)
yield item_list
def _get_data(self):
hook = self.get_db_hook()
with closing(hook.get_conn()) as conn, closing(conn.cursor()) as cur:
self.log.info("Executing query")
cur.execute(self.sql, self.parameters or ())
yield [field[0] for field in cur.description]
yield from self._data_prep(cur.fetchall())
def execute(self, context: Any) -> None:
self.log.info("Getting data")
values = list(self._get_data())
self.log.info("Connecting to Google")
sheet_hook = GSheetsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info(f"Uploading data to https://docs.google.com/spreadsheets/d/{self.spreadsheet_id}")
sheet_hook.update_values(
spreadsheet_id=self.spreadsheet_id,
range_=self.spreadsheet_range,
values=values,
)
| StarcoderdataPython |
4817556 | """
Module: 'requests' on esp32_LoBo
MCU: (sysname='esp32_LoBo', nodename='esp32_LoBo', release='3.2.24', version='ESP32_LoBo_v3.2.24 on 2018-09-06', machine='ESP32 board with ESP32')
Stubber: 1.0.0
"""
def certificate():
pass
def debug():
pass
def get():
pass
def head():
pass
def patch():
pass
def post():
pass
def put():
pass
| StarcoderdataPython |
1654643 | """
使用簡單的前饋神經網路feed-forward neural network來訓練並預測mnist資料集,用我自行產生的csv檔來當做資料來源.
see tensorflow-1.6.0/tensorflow/examples/tutorials/mnist/fully_connected_feed.py
"""
import tensorflow as tf
from mnist import mnist_dataset as md
from mnist import mnist_core as mnist
#from tensorflow.examples.tutorials.mnist import mnist
from six.moves import xrange
import time
import os
def placeholder_inputs(batch_size):
"""建立兩個tf placeholder來存於輸入值及標籤.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, md.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, batch_size, images_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
batch_size: batch size
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
images_feed, labels_feed = data_set.next_batch(batch_size)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set,
batch_size):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // batch_size
num_examples = steps_per_epoch * batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
batch_size,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = float(true_count) / num_examples
print('Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training(batch_size, hidden1, hidden2, learning_rate, log_dir, max_steps):
"""Train MNIST for a number of steps.
Args:
batch_size: 每之送入幾個樣本到網路中做訓練
hidden1: 第一層隱藏層的神經元個數
hidden2: 第二層隱藏層的神經元個數
learning_rate: 學習速率
log_dir: log檔案的存放目錄
max_step: 訓練幾次,這個數字並不等於epoch數,一個step會做一次batch訓練,如果batch_size=100,max_step=200,總共只會用到20000個樣本
"""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
trainingSet = md.DataSet('/Users/ken/Development/java/MyJavaDojo/mnist-training-set.csv')
testSet = md.DataSet('/Users/ken/Development/java/MyJavaDojo/mnist-test-set.csv')
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder,
hidden1,
hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(trainingSet,
batch_size,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == max_steps:
checkpoint_file = os.path.join(log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
trainingSet,
batch_size)
# I don't have validation set
# Evaluate against the validation set.
#print('Validation Data Eval:')
#do_eval(sess,
# eval_correct,
# images_placeholder,
# labels_placeholder,
# data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
testSet,
batch_size)
if __name__ == '__main__':
#不知道為什麼,這裡必須有足夠大的batch_size,loss才會顯著下降
run_training(batch_size = 5000, hidden1 = 128, hidden2 = 32, learning_rate=0.01, log_dir='/Users/ken', max_steps=2000) | StarcoderdataPython |
153506 | <reponame>MikeChurvis/mikechurvis.github.io<filename>api-v2/ContactForm/migrations/0008_alter_contactformentry_message.py
# Generated by Django 4.0.4 on 2022-05-25 22:06
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ContactForm', '0007_alter_contactformentry_organization'),
]
operations = [
migrations.AlterField(
model_name='contactformentry',
name='message',
field=models.TextField(max_length=1000, validators=[django.core.validators.MinLengthValidator(20)]),
),
]
| StarcoderdataPython |
3327379 | <gh_stars>1-10
import numpy as np
import math
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
The Tweaker class handles various tweaks used when testing and
developing.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class Tweaker:
def __init__(self):
# Number of observations in training data to be generated.
self.trainingIterations = 3000
# Number of observations in test data to be generated.
self.testIterations = 3000
""" Reward parameters"""
# Take ground state into account.
self.checkGS = False
# Growing reward scheme for ground state.
self.GSgrowth = False
# Reward for correct ground state (if growing reward scheme, this is the final reward).
self.correctGsR = 5
# Reward for incorrect ground state.
self.incorrectGsR = -1
# Reward for taking a step (action).
self.stepR = -1
# Growing reward scheme for correct ground state according to the formula.
# R_GS = A * tanh(w(x+b))+ B
# Reward scheme curve independent of number of observations.
self.groundStateShape = False
# Growing reward scheme function parameters.
self.AGS = 0.5 * self.correctGsR
self.BGS = self.AGS
if self.groundStateShape:
self.wGS = math.pi / (0.275 * self.trainingIterations)
self.bGS = 0.39 * self.trainingIterations
else:
self.wGS = math.pi / 55000
self.bGS = 78000
""" Epsilon decay """
# Epsilon if decay is not used.
self.epsilon = 0.1
# Use decaying epsilon.
self.epsilonDecay = False
# Epsilon dependent of time.
self.time = 3600 * 22 / 10
# Epsilon decay curve independent of number of iterations.
self.epsilonShape = True
self.alpha = -0.9
if self.epsilonShape:
self.k = self.time / 10
else:
self.k = 7000
""" Error rate while training"""
# Use growing error rate.
self.errorGrowth = False
# Error growth curve shape indepdenten of number of iterations.
self.errorShape = False
# Error rate if growing error rate is not used. (if growing: this is the final error rate)
self.Pe = 0.1
# Initial error rate (only relevant if growing error rate is used)
self.Pei = 0.04
# Error rate for test data.
self.PeTest = 0.1
# Error rate is growing according to the formula
# P_e = A * tanh(w(x+b))+ B
# Set parameters in error growth rate function.
self.AE = 0.5 * self.Pe - 0.5 * self.Pei
self.BE = 0.5 * self.Pe + 0.5 * self.Pei
if self.errorShape:
self.wE = math.pi / (0.125*self.trainingIterations)
self.bE = -0.13 * self.trainingIterations
else:
self.wE = math.pi/25000
self.bE = -26000
# If validation should be conducted while training.
self.isCollectingTrainingStats = False
if __name__ == '__main__':
tweak = Tweaker()
np.save("Tweaks/trainingIterations.npy",tweak.trainingIterations)
np.save("Tweaks/testIterations.npy",tweak.testIterations)
np.save("Tweaks/checkGS.npy",tweak.checkGS)
np.save("Tweaks/GSgrowth.npy",tweak.GSgrowth)
np.save("Tweaks/correctGsR.npy",tweak.correctGsR)
np.save("Tweaks/incorrectGsR.npy",tweak.incorrectGsR)
np.save("Tweaks/stepR.npy",tweak.stepR)
np.save("Tweaks/groundStateShape.npy",tweak.groundStateShape)
np.save("Tweaks/AGS.npy",tweak.AGS)
np.save("Tweaks/BGS.npy",tweak.BGS)
np.save("Tweaks/wGS.npy",tweak.wGS)
np.save("Tweaks/bbGS.npy",tweak.bGS)
np.save("Tweaks/epsilonDecay.npy",tweak.epsilonDecay)
np.save("Tweaks/epsilon.npy",tweak.epsilon)
np.save("Tweaks/epsilonShape.npy",tweak.epsilonShape)
np.save("Tweaks/alpha.npy",tweak.alpha)
np.save("Tweaks/k.npy",tweak.k)
np.save("Tweaks/errorGrowth.npy",tweak.errorGrowth)
np.save("Tweaks/errorShape.npy",tweak.errorShape)
np.save("Tweaks/Pe.npy",tweak.Pe)
np.save("Tweaks/Pei.npy",tweak.Pei)
np.save("Tweaks/PeTest.npy",tweak.PeTest)
np.save("Tweaks/AE.npy",tweak.AE)
np.save("Tweaks/BEcap.npy",tweak.BE)
np.save("Tweaks/wE.npy",tweak.wE)
np.save("Tweaks/bE.npy",tweak.bE)
np.save("Tweaks/isCollectingTrainingStats.npy", tweak.isCollectingTrainingStats)
| StarcoderdataPython |
3355209 | <filename>podrum/config.py
################################################################################
# #
# ____ _ #
# | _ \ ___ __| |_ __ _ _ _ __ ___ #
# | |_) / _ \ / _` | '__| | | | '_ ` _ \ #
# | __/ (_) | (_| | | | |_| | | | | | | #
# |_| \___/ \__,_|_| \__,_|_| |_| |_| #
# #
# Copyright 2021 <NAME> #
# #
# Permission is hereby granted, free of charge, to any person #
# obtaining a copy of this software and associated documentation #
# files (the "Software"), to deal in the Software without restriction, #
# including without limitation the rights to use, copy, modify, merge, #
# publish, distribute, sublicense, and/or sell copies of the Software, #
# and to permit persons to whom the Software is furnished to do so, #
# subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS #
# IN THE SOFTWARE. #
# #
################################################################################
import json
import os
class config:
def __init__(self, path: str) -> None:
self.path: str = os.path.abspath(path)
self.data: dict = {}
basename: str = os.path.basename(self.path)
extension: str = basename.rsplit(".")[1].lower()
self.extension: str = extension
if not os.path.isfile(path):
self.save()
if extension == "json":
self.data: dict = json.load(open(path, "rt"))
def save(self) -> None:
if self.extension == "json":
json.dump(self.data, open(self.path, "wt"), indent = 4)
| StarcoderdataPython |
3249610 | # Faça um programa que tenha uma função chamada área(), que receba as dimensões de um terreno retangular
# (largura e comprimento) e mostre a área do terreno.
def area(largura, comprimento):
areaTerreno = largura * comprimento
print(f'A área de um terrono {largura}x{comprimento} é de {areaTerreno:.1f}m².')
area(float(input('Largura (m): ')), float(input('Comprimento(m): ')))
| StarcoderdataPython |
1704128 | <gh_stars>0
age = input("Enter your age ")
age = int(age)
if age >= 15:
print("you can play the game because you are above the 15")
else:
print("you can't play the game because you are above the 15") | StarcoderdataPython |
197947 | <reponame>hubo1016/vlcp
from vlcp.server.module import Module, publicapi
from vlcp.event.runnable import RoutineContainer
from vlcp.utils.networkmodel import PhysicalNetworkMap, PhysicalNetwork,\
VXLANEndpointSet
from vlcp.config.config import defaultconfig
from vlcp.utils.networkplugin import createphysicalnetwork,\
updatephysicalnetwork, default_physicalnetwork_keys, deletephysicalnetwork,\
deletephysicalport, default_physicalport_keys, createlogicalnetwork,\
default_logicalnetwork_keys, default_processor, updatelogicalnetwork,\
deletelogicalnetwork, createphysicalport, updatephysicalport,\
default_logicalnetwork_delete_check
from vlcp.utils.exceptions import WalkKeyNotRetrieved
from vlcp.utils.walkerlib import ensure_keys
@defaultconfig
class NetworkVxlanDriver(Module):
"""
Network driver for VXLAN networks. When creating a VXLAN type physical network,
you must specify an extra option ``vnirange``.
"""
def __init__(self,server):
super(NetworkVxlanDriver,self).__init__(server)
self.app_routine = RoutineContainer(self.scheduler)
self.app_routine.main = self._main
self.routines.append(self.app_routine)
self.createAPI(
publicapi(self.createphysicalnetwork,
criteria=lambda type: type == 'vxlan'),
publicapi(self.updatephysicalnetwork,
criteria=lambda type: type == 'vxlan'),
publicapi(self.deletephysicalnetwork,
criteria=lambda type: type == 'vxlan'),
publicapi(self.createphysicalport,
criteria=lambda type: type == 'vxlan'),
publicapi(self.updatephysicalport,
criteria=lambda type: type == 'vxlan'),
publicapi(self.deletephysicalport,
criteria=lambda type: type == 'vxlan'),
publicapi(self.createlogicalnetwork,
criteria=lambda type: type == 'vxlan'),
publicapi(self.updatelogicalnetwork,
criteria=lambda type: type == 'vxlan'),
publicapi(self.deletelogicalnetwork,
criteria=lambda type: type == "vxlan")
)
async def _main(self):
self._logger.info("network_vxlan_driver running ---")
def createphysicalnetwork(self, type):
# create an new physical network
def create_physicalnetwork_processor(physicalnetwork, walk, write, *, parameters):
if 'vnirange' not in parameters:
raise ValueError('must specify vnirange with physical network type=vxlan')
_check_vnirange(parameters['vnirange'])
return default_processor(physicalnetwork, parameters=parameters, excluding=('id', 'type'))
return createphysicalnetwork(type, create_processor=create_physicalnetwork_processor),\
default_physicalnetwork_keys
def updatephysicalnetwork(self, type):
# update a physical network
def update_physicalnetwork_keys(id_, parameters):
if 'vnirange' in parameters:
return (PhysicalNetworkMap.default_key(id_),)
else:
return ()
def update_physicalnetwork_processor(physicalnetwork, walk, write, *, parameters):
if 'vnirange' in parameters:
_check_vnirange(parameters['vnirange'])
try:
phymap = walk(PhysicalNetworkMap.default_key(physicalnetwork.id))
except WalkKeyNotRetrieved:
pass
else:
_check_vnirange_allocation(parameters['vnirange'], phymap.network_allocation)
return default_processor(physicalnetwork, parameters=parameters, disabled=('type',))
return updatephysicalnetwork(update_processor=update_physicalnetwork_processor), update_physicalnetwork_keys
def deletephysicalnetwork(self, type):
return deletephysicalnetwork(), default_physicalnetwork_keys
def createphysicalport(self, type):
return createphysicalport(), default_physicalport_keys
def updatephysicalport(self, type):
return updatephysicalport(), None
def deletephysicalport(self, type):
return deletephysicalport(), default_physicalport_keys
def createlogicalnetwork(self, type):
def logicalnetwork_processor(logicalnetwork, logicalnetworkmap, physicalnetwork,
physicalnetworkmap, walk, write, *, parameters):
if 'vni' in parameters:
# Allocate this vni
vni = int(parameters['vni'])
if _isavaliablevni(physicalnetwork.vnirange, physicalnetworkmap.network_allocation, vni):
physicalnetworkmap.network_allocation[str(vni)] = logicalnetwork.create_weakreference()
write(physicalnetworkmap.getkey(), physicalnetworkmap)
else:
raise ValueError("Specified VNI " + str(vni) + " allocated or not in range")
else:
# Allocate a vni from range
vni = _findavaliablevni(physicalnetwork.vnirange, physicalnetworkmap.network_allocation)
if vni is None:
raise ValueError("no available VNI in physical network " + physicalnetwork.id)
physicalnetworkmap.network_allocation[str(vni)] = logicalnetwork.create_weakreference()
write(physicalnetworkmap.getkey(), physicalnetworkmap)
logicalnetwork.vni = vni
write(logicalnetwork.getkey(), logicalnetwork)
# Create VXLANEndpointSet
vxlanendpoint_set = VXLANEndpointSet.create_instance(logicalnetwork.id)
write(vxlanendpoint_set.getkey(), vxlanendpoint_set)
return default_processor(logicalnetwork, parameters=parameters, excluding=('id', 'physicalnetwork', 'vni'))
# Process logical networks with specified IDs first
return createlogicalnetwork(create_processor=logicalnetwork_processor,
reorder_dict=lambda x: sorted(x.items(), key=lambda y: 'vni' in y[1], reverse=True)),\
default_logicalnetwork_keys
def updatelogicalnetwork(self, type):
# When updating VLAN ids, Must first deallocate all VLAN ids, then allocate all
# Chaining two walkers for this
def update_logicalnetwork_keys(id_, parameters):
if 'vni' in parameters:
return (PhysicalNetwork.default_key(id_),
PhysicalNetworkMap.default_key(id_))
else:
return ()
def deallocate_processor(logicalnetwork, walk, write, *, parameters):
if 'vni' in parameters:
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(logicalnetwork.physicalnetwork))
except WalkKeyNotRetrieved:
pass
else:
del phymap.network_allocation[str(logicalnetwork.vni)]
write(phymap.getkey(), phymap)
return False
deallocate_walker = updatelogicalnetwork(update_processor=deallocate_processor)
def allocate_processor(logicalnetwork, walk, write, *, parameters):
if 'vni' in parameters:
try:
phynet = walk(logicalnetwork.physicalnetwork.getkey())
phymap = walk(PhysicalNetworkMap._network.leftkey(logicalnetwork.physicalnetwork))
except WalkKeyNotRetrieved:
ensure_keys(PhysicalNetworkMap._network.leftkey(logicalnetwork.physicalnetwork))
else:
vni = int(parameters['vni'])
if _isavaliablevni(phynet.vnirange, phymap.network_allocation, vni):
phymap.network_allocation[str(vni)] = logicalnetwork.create_weakreference()
write(phymap.getkey(), phymap)
else:
raise ValueError("Specified VNI " + str(vni) + " allocated or not in range")
logicalnetwork.vni = vni
write(logicalnetwork.getkey(), logicalnetwork)
return default_processor(logicalnetwork, parameters=parameters, excluding=('id', 'vni'),
disabled=('physicalnetwork',))
allocate_walker = updatelogicalnetwork(update_processor=allocate_processor)
def walker(walk, write, timestamp, parameters_dict):
deallocate_walker(walk, write, timestamp, parameters_dict)
allocate_walker(walk, write, timestamp, parameters_dict)
return walker, update_logicalnetwork_keys
def deletelogicalnetwork(self, type):
def check_processor(logicalnetwork, logicalnetworkmap,
physicalnetwork, physicalnetworkmap,
walk, write, *, parameters):
default_logicalnetwork_delete_check(logicalnetwork, logicalnetworkmap,
physicalnetwork, physicalnetworkmap,
walk, write, parameters=parameters)
del physicalnetworkmap.network_allocation[str(logicalnetwork.vni)]
write(physicalnetworkmap.getkey(), physicalnetworkmap)
return deletelogicalnetwork(check_processor=check_processor), default_logicalnetwork_keys
def _check_vnirange(vnirange):
lastend = 0
for start,end in vnirange:
if start <= 0 or end > (1 << 24) - 1:
raise ValueError('VNI out of range (1 - 4095)')
if start > end or start <= lastend:
raise ValueError('VNI sequences overlapped or disordered: [%r, %r]' % (start, end))
lastend = end
def _check_vnirange_allocation(vnirange, allocation):
allocated_ids = sorted(int(k) for k in allocation.keys())
range_iter = iter(vnirange)
current_range = None
for id_ in allocated_ids:
while current_range is None or current_range[1] < id_:
try:
current_range = next(range_iter)
except StopIteration:
raise ValueError("Allocated VNI " + str(id_) + " not in new VNI range")
if current_range[0] > id_:
raise ValueError("Allocated VNI " + str(id_) + " not in new VNI range")
def _findavaliablevni(vnirange,allocated):
vni = None
for vr in vnirange:
find = False
for v in range(vr[0],vr[1] + 1):
if str(v) not in allocated:
vni = v
find = True
break
if find:
break
return vni
def _isavaliablevni(vnirange,allocated,vni):
find = False
for start,end in vnirange:
if start <= int(vni) <= end:
find = True
break
if find:
if str(vni) not in allocated:
find = True
else:
find = False
else:
find = False
return find
| StarcoderdataPython |
1736649 | <reponame>ChenfuShi/HiChIP_peaks<gh_stars>1-10
#########################################
# Author: <NAME>
# Email: <EMAIL>
#########################################
# to install without copying files
# pip install -e .
# to create distribution packages python setup.py sdist bdist_wheel
# python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/*
# twine upload dist/*
__version__="0.1.2"
# Read contents of long description (same as github readme)
with open("README.md", "r") as fh:
long_description = fh.read()
# Update version number in __init__.py, This allows people to get the currently installed version from files when they import
with open("./hichip_peaks/__init__.py","w") as init_file:
init_file.write(f"__version__=\"{__version__}\"")
from setuptools import setup
setup(name='hichip_peaks',
version=__version__,
description='A tool to find peaks from hichip data',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/ChenfuShi/HiChIP_peaks',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
packages=['hichip_peaks'],
install_requires=[
'scipy',
'numpy',
'statsmodels',
'matplotlib'
],
entry_points = {
'console_scripts':
['peak_call=hichip_peaks.main:main',
'make_bedgraph=hichip_peaks.bedgraph:main',
'diff_peaks=hichip_peaks.diffpeaks:main',
],
},
python_requires='>=3.6',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: BSD License',
],
)
def readme():
with open('README.md') as f:
return f.read() | StarcoderdataPython |
1639700 | import os
import atexit
import signal
import numpy as np
import tensorflow as tf
from subprocess import Popen, PIPE
from glearn.utils.log import log, log_warning
from glearn.utils.path import remove_empty_dirs
from glearn.utils import tf_utils
SUMMARY_KEY_PREFIX = "_summary_"
DEFAULT_EVALUATE_QUERY = "evaluate"
DEFAULT_EXPERIMENT_QUERY = "experiment"
class SummaryWriter(object):
class Results(object):
def __init__(self, query):
self.query = query
self.results = []
self.simple_summaries = {}
def __init__(self, config):
self.config = config
self.server = None
# TODO - measure the performance impact of all these sanitizations
self.debug_numerics = self.config.is_debugging("debug_numerics")
@property
def sess(self):
return self.config.sess
def start(self, **kwargs):
self.summary_path = self.config.summary_path
self.summaries = {}
self.summary_fetches = {}
self.summary_results = {}
self.run_metadatas = {}
self.writers = {}
# get graph
self.kwargs = kwargs
if "graph" not in self.kwargs:
self.kwargs["graph"] = self.sess.graph
server = self.config.get("tensorboard", False)
if server:
# start tensorboard server
if self.server is None:
# if this is the first evaluation, clean all experiment summaries
self.clean_summaries(self.config.tensorboard_path)
self.start_server()
else:
# clean only this evaluation's summaries
self.clean_summaries(self.summary_path)
os.makedirs(self.summary_path, exist_ok=True)
def stop(self):
for _, writer in self.writers.items():
writer.close()
self.writers = {}
def start_server(self):
if self.server is None:
# tensorboard should ignore Ctrl-C interrupts, and only be terminated explicitly
def ignore_interrupt():
signal.signal(signal.SIGINT, signal.SIG_IGN)
# start tensorboard server
path = self.config.tensorboard_path
port = 6006
self.server = Popen(["tensorboard", "--logdir", path], preexec_fn=ignore_interrupt,
stdout=PIPE, stderr=PIPE)
atexit.register(self.stop_server)
url = f"http://{self.config.ip}:{port}"
log(f"Started tensorboard server: {url} ({path})", color="white", bold=True)
def stop_server(self):
# stop tensorboard server
if self.server is not None:
log(f"Stopping tensorboard server")
self.server.terminate()
self.server = None
def clean_summaries(self, path):
# delete all events.out.tfevents files, and cleanup empty dirs
for root, dirs, files in os.walk(path):
for sub_path in files:
if sub_path.startswith("events.out.tfevents"):
os.remove(os.path.join(root, sub_path))
remove_empty_dirs(path)
def get_summary_results(self, query):
if query not in self.summary_results:
self.summary_results[query] = self.Results(query) # TODO FIXME - what is thiS?!
return self.summary_results[query]
def add_simple_summary(self, name, query=None, allow_overwrite=False, **kwargs):
query = query or DEFAULT_EXPERIMENT_QUERY
summary_results = self.get_summary_results(query)
tag = self.summary_scope(name, query)
if not allow_overwrite and tag in summary_results.simple_summaries:
log_warning(f"Overwriting simple summary value: {tag} "
"(Use set_simple_value to avoid warning.)")
summary_results.simple_summaries[tag] = tf.Summary.Value(tag=tag, **kwargs)
def add_simple_value(self, name, value, query=None, allow_overwrite=False):
if self.debug_numerics:
value = np.nan_to_num(value)
self.add_simple_summary(name, simple_value=value, query=query,
allow_overwrite=allow_overwrite)
def set_simple_value(self, name, value, query=None):
self.add_simple_value(name, value, query=query, allow_overwrite=True)
def add_summary_value(self, name, summary, query=None):
query = query or DEFAULT_EVALUATE_QUERY
if query in self.summaries:
query_summaries = self.summaries[query]
else:
query_summaries = []
self.summaries[query] = query_summaries
query_summaries.append(summary)
return summary
def add_scalar(self, name, tensor, query=None):
if self.debug_numerics:
tensor = tf_utils.nan_to_num(tensor)
summary = tf.summary.scalar(name, tensor)
return self.add_summary_value(name, summary, query=query)
def add_histogram(self, name, values, query=None):
if self.debug_numerics:
values = tf_utils.nan_to_num(values)
summary = tf.summary.histogram(name, values)
return self.add_summary_value(name, summary, query=query)
def add_activation(self, tensor, query=None):
if tensor is None:
return
name = tensor.op.name
self.add_histogram(f"{name}/activation", tensor, query=query)
self.add_scalar(f"{name}/sparsity", tf.nn.zero_fraction(tensor), query=query)
def add_variables(self, tvars, query=None):
for tvar in tvars:
name = tvar.op.name
self.add_histogram(f"{name}/value", tvar, query=query)
def add_gradients(self, grads_tvars, query=None):
for grad, tvar in grads_tvars:
if grad is None:
continue
name = tvar.op.name
self.add_histogram(f"{name}/gradient", grad, query=query)
def add_images(self, name, images, max_outputs=3, query=None):
if self.debug_numerics:
images = tf_utils.nan_to_num(images)
summary = tf.summary.image(name, images, max_outputs=max_outputs)
return self.add_summary_value(name, summary, query=query)
def add_simple_images(self, name, images, max_outputs=3, query=None, allow_overwrite=False):
# matplotlib allows image encoding. the imports are here since they are slow.
import io
import matplotlib
try:
# first try this
matplotlib.use('TkAgg')
except Exception:
# fallback backend
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# convert image to 3-channel
if images.shape[-1] == 1:
images = np.stack((np.squeeze(images, axis=-1),) * 3, axis=-1)
if self.debug_numerics:
images = np.nan_to_num(images)
for i, image in enumerate(images):
im_bytes = io.BytesIO()
plt.imsave(im_bytes, image, format='png')
summary_image = tf.Summary.Image(encoded_image_string=im_bytes.getvalue())
self.add_simple_summary(f"{name}/{i}", image=summary_image, query=query)
def set_simple_images(self, name, images, max_outputs=3, query=None):
self.add_simple_images(name, images, max_outputs=max_outputs, query=query,
allow_overwrite=True)
def add_text(self, name, tensor, query=None):
summary = tf.summary.text(name, tensor)
return self.add_summary_value(name, summary, query=query)
def write_text(self, name, tensor, query=None):
# TODO - convert to: add_simple_text(...)
query = query or DEFAULT_EXPERIMENT_QUERY
tag = self.summary_scope(name, query)
summary = tf.summary.text(tag, tensor)
self.config.sess.run({self.get_query_key(query): summary})
def add_run_metadata(self, run_metadata, query=None):
self.run_metadatas[query] = run_metadata
def get_fetch(self, query=None):
if query in self.summary_fetches:
return self.summary_fetches[query]
if query in self.summaries:
fetch = tf.summary.merge(self.summaries[query])
self.summary_fetches[query] = fetch
return fetch
return None
def get_query_key(self, query=None):
if query is None:
return SUMMARY_KEY_PREFIX
return f"{SUMMARY_KEY_PREFIX}{query}"
def prepare_fetches(self, fetches, query=None):
if not isinstance(query, list):
query = [query]
for query_name in query:
fetch = self.get_fetch(query_name)
if fetch is not None:
fetches[self.get_query_key(query_name)] = fetch
def process_results(self, results):
results_keys = list(results.keys())
for key in results_keys:
if key.startswith(SUMMARY_KEY_PREFIX):
query = key[len(SUMMARY_KEY_PREFIX):]
if len(query) == 0:
query = None
query_results = results.pop(key, None)
summary_results = self.get_summary_results(query)
summary_results.results.append(query_results)
def summary_scope(self, name, query=None):
if query is None:
return name
return f"{query}/{name}"
def flush(self, global_step=None):
# collect all relevant query
query = set(list(self.summary_results.keys()) + list(self.run_metadatas.keys()))
# flush summary data
for query_name in query:
# get writer
path = os.path.abspath(self.summary_path)
if query_name is None:
query_name = DEFAULT_EVALUATE_QUERY
path = os.path.join(path, query_name)
if query_name in self.writers:
writer = self.writers[query_name]
else:
writer = tf.summary.FileWriter(path, **self.kwargs)
self.writers[query_name] = writer
# write any summary results for query
summary_results = self.summary_results.pop(query_name, None)
if summary_results is not None:
# write results
if len(summary_results.results) > 0:
summary = summary_results.results[0] # TODO - average
writer.add_summary(summary, global_step=global_step)
# write simple values
summary_values = list(summary_results.simple_summaries.values())
simple_summary = tf.Summary(value=summary_values)
writer.add_summary(simple_summary, global_step=global_step)
# write any metadata results for query
run_metadata = self.run_metadatas.pop(query_name, None)
if run_metadata is not None:
if query_name is not None:
tag = f"{query_name}/step{global_step}"
else:
tag = f"step{global_step}"
writer.add_run_metadata(run_metadata, tag, global_step)
# flush writer
writer.flush()
class NullSummaryWriter(object):
def __init__(self, **kwargs):
pass
def start(self, **kwargs):
pass
def stop(self, **kwargs):
pass
def add_simple_value(self, **kwargs):
pass
def add_scalar(self, **kwargs):
return None
def add_histogram(self, **kwargs):
return None
def add_activation(self, **kwargs):
return None
def add_gradients(self, **kwargs):
return None
def get_fetch(self, **kwargs):
return None
def prepare_fetches(self, **kwargs):
pass
def process_results(self, **kwargs):
pass
def flush(self, **kwargs):
pass
| StarcoderdataPython |
3267092 | import dataclasses
import enum
import typing
from vkmodels.bases.object import ObjectBase
@dataclasses.dataclass
class State(
ObjectBase,
):
description: typing.Optional[str] = None
state: typing.Optional[int] = None
| StarcoderdataPython |
1728636 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# # Oliver ######## . # # # . #
# # Bonham ######## . # # # . #
# # Carter ######## . # # # . #
# ################# . # # ############
# ################# . # # # . #
# ################# . # # # . #
# ################# . # # # . #
#
#
#
#
#
import streamlit as st
import pandas as pd
import numpy as np
import time
import sys
import os
from pyvis.network import Network
from plotly import graph_objs as go
import networkx as nx
import spacy # needed to work with stopwords
from spacy.lang.en.stop_words import STOP_WORDS # needed to work with stop words
import beagleTM2_browser_helperCode as hc
DATE = "6 Feb 2021"
VERSION = "2_iv"
AUTHOR = "<NAME>"
AUTHORMAIL = "<EMAIL>"
"""The driver program for the analysis side of the thing. """
def begin():
"""Driver function"""
st.text(hc.banner0_str)
st.sidebar.title("BeagleTM Data Analysis")
st.sidebar.subheader(f"Date: {hc.DATE}, Ver: {hc.VERSION}")
st.sidebar.text("\U0001F415 \U0001F631 \U0001f5ff \U0001F608 \U0001f600 ")
# Create a text element and let the reader know the data is loading.
# get and load the file.
myFile_str = hc.grabFile()
try:
data = hc.load_big_data(myFile_str)
# create a dictionary having headers as keys and values as lists of column data.
data_dic = hc.createMasterDataDic(data)
except:
st.sidebar.error("No data entered...")
# menu system
doThis_sb = st.sidebar.selectbox(
"What are we doing with this data?",
[
"ReadMe",
"Show_data",
"Articles connected by pmids",
"Articles having ANY of the selected keywords",
"Articles having ALL of the selected keywords",
"Heatmaps of keyword saturation",
"Make Simple Heatmaps"
],
)
if doThis_sb == "ReadMe":
with open("README.md") as readme_file:
st.markdown(readme_file.read())
if doThis_sb == "Show_data":
hc.showData(data)
if doThis_sb == "Articles connected by pmids":
hc.articleConnectivity(data_dic)
if doThis_sb == "Articles having ANY of the selected keywords":
hc.keywordAnalysis(data_dic)
if doThis_sb == "Articles having ALL of the selected keywords":
hc.keywordAndkeywordsInArticle(data_dic)
if doThis_sb == "Heatmaps of keyword saturation":
hc.keywordSaturation(data_dic)
if doThis_sb == "Make Simple Heatmaps":
hc.simpleHeatmaps(data, data_dic)
hc.writer("\U0001F415 WOO WOO!! \U0001F415")
#hc.writer(" ok :","computer")
# end of begin()
#
#
# # progress_bar = st.progress(0)
# #
# # for i in range(100):
# # # Update progress bar.
# # progress_bar.progress(i)
#
begin()
| StarcoderdataPython |
3315063 | import io
import urllib
import torch
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
def load_image_buffer_to_tensor(image_buf, device):
"""Maps image bytes buffer to tensor
Args:
image_buf (bytes buffer): The image bytes buffer
device (object): The pytorch device object
Returns:
py_tensor tensor: Pytorch tensor
"""
image = Image.open(io.BytesIO(image_buf))
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
input_tensor = preprocess(image)
input_batch = input_tensor.unsqueeze(0)
return input_batch.to(device, torch.float)
def download_image(args):
"""Downloads image from given url
Args:
args (str, str) tuple of url path from which image will be downloaded
and image name that will be used to write file on disc
Returns:
filename (str): Full name of the file.
"""
url_path, name = args[0], args[1]
try:
urllib.URLopener().retrieve(url_path, name + '.jpg')
except:
urllib.request.urlretrieve(url_path, name + '.jpg')
return name
def load_tensor_and_image_from_file(image_path, device):
"""Loads image to tensor
Args:
image_path (str): The path to image
device (object): The pytorch device object
Returns:
tuple (tensor, image object): Tuple of Pytorch tensor
"""
input_image = Image.open(image_path)
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0)
return input_batch.to(device, torch.float), input_image
def draw_image_and_recogintion(input_image, output_predictions):
"""Draws image alongside with recognition
Args:
input_image (numpy array): Vectorized image to numpy array
output_predictions (Tensor): AI model output predictions tensor
Returns:
void
"""
image = _map_predictions_to_image(input_image, output_predictions)
fig = plt.figure(figsize=(12, 6))
for i, img in enumerate([image, input_image]):
fig.add_subplot(1, 2, i + 1)
plt.imshow(img)
plt.show()
def map_predictions_on_image_buffer(image_buf, output_predictions):
"""Maps predictions to image and transforms to bytes buffer
Args:
image_buf (bytes string): Raw bytes string
output_predictions (Tensor): AI model output predictions tensor
Returns:
image (PIL Image class instance): image object from PIL Image module
"""
image = Image.open(io.BytesIO(image_buf))
image = _map_predictions_to_image(image, output_predictions)
img_bytes_array = io.BytesIO()
image.save(img_bytes_array, format='PNG')
img_bytes_array = img_bytes_array.getvalue()
return img_bytes_array
def _map_predictions_to_image(input_image, output_predictions):
palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
colors = torch.as_tensor([i for i in range(21)])[:, None] * palette
colors = (colors % 255).numpy().astype("uint8")
image = Image.fromarray(output_predictions.byte().cpu()
.numpy()).resize(input_image.size)
image.putpalette(colors)
return image
| StarcoderdataPython |
121878 | from datetime import datetime
from flask import Blueprint, render_template, redirect, url_for, flash, abort
from flask_login import login_required, current_user
from app.models import EditableHTML, SiteSetting
from .forms import SiteSettingForm, PostForm, CategoryForm, EditCategoryForm, StatusForm
import commonmark
from app import db
from app.decorators import admin_required
#from .forms import PostForm, CategoryForm, EditCategoryForm
public = Blueprint('public', __name__)
@public.route('/')
def index():
public = SiteSetting.query.limit(1).all()
return render_template("public/public.html",public=public)
@public.route('/about')
def about():
public = SiteSetting.find_all()
return render_template("public/about.html",public=public)
@public.route('/all')
@login_required
@admin_required
def site_public():
all_public = SiteSetting.query.order_by(SiteSetting.id).all()
return render_template("public/index.html",
public=all_public)
@public.route('/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_site_setting(id):
form = SiteSettingForm()
site_setting = db.session.query(SiteSetting).filter(SiteSetting.id==id).first()
if(site_setting is None):
abort(404)
if form.validate_on_submit():
site_setting.site_title = form.site_title.data
site_setting.siteaddress= form.siteaddress.data
site_setting.administration_user_address=form.administration_user_address.data
site_setting.site_Language = form.site_Language.data
db.session.add(site_setting)
flash('"{0}" has been saved'.format(site_setting.site_title))
return redirect(url_for('public.site_public'))
form.site_title.data = site_setting.site_title
form.siteaddress.data = site_setting.siteaddress
form.administration_user_address.data= site_setting.administration_user_address
form.site_Language.data=site_setting.site_Language
return render_template("public/edit.html", form=form,
setting=site_setting)
@public.route('/new', methods=['GET', 'POST'])
@login_required
@admin_required
def new_site_setting():
check_data_exists = SiteSetting.query.get(1)
#if check_data_exists is None :
#return redirect(url_for('public.edit_site_setting',id=id))
form = SiteSettingForm()
if form.validate_on_submit():
site_setting = SiteSetting()
site_setting.site_title = form.site_title.data
site_setting.siteaddress = form.siteaddress.data
site_setting.administration_user_address =form.administration_user_address.data
site_setting.site_Language = form.site_Language.data
db.session.add(site_setting)
flash('"{0}" has been saved'.format(site_setting.site_title))
return redirect(url_for('public.site_public'))
return render_template("public/new.html", form=form)
@public.route('/delete/<int:id>')
@login_required
@admin_required
def delete_site_setting(id):
setting = SiteSetting.query.filter_by(id=id).first()
if(setting is not None):
db.session.delete(setting)
flash('"{0}" has been deleted.'.format(setting.site_title))
return redirect(url_for('public.site_public'))
flash('Setting does not exist')
return redirect(url_for('public.site_public'))
| StarcoderdataPython |
1780314 | <gh_stars>0
from subprocess import Popen, DEVNULL, TimeoutExpired
import logging
import argparse
import json
import os
from os.path import join, exists
import shutil
import pathlib
import tempfile
root_dir = os.getcwd()
workdir = join(root_dir, ".prep_dev_patch")
logger = logging.getLogger("prep_dev_patch")
prog_config = dict()
class CustomException(Exception):
def __init__(self, msg):
self.msg = msg
def get_msg(self):
return self.msg
class PatchGen:
def __init__(self, project, bug_id, src_patch_id):
self.project = project
self.bug_id = bug_id
self.src_patch_id = src_patch_id
self.buggy_dir_name = project + bug_id + 'b'
self.dfj4_buggy_dir = join(workdir, self.buggy_dir_name)
self.fix_dir_name = project + bug_id + 'f'
self.dfj4_fix_dir = join(workdir, self.fix_dir_name)
self.dev_configs_dir = join(root_dir, 'dev_configs')
self.prep_dir(self.dev_configs_dir)
self.dev_patches_dir = join(root_dir, 'dev_patches')
self.prep_dir(self.dev_patches_dir)
def __call__(self):
self.prep_dir(workdir)
patch_file = join(self.dev_patches_dir,
self.patch_name(self.project, self.bug_id))
patch_file_copy = join('patches',
self.patch_name(self.project, self.bug_id))
if exists(patch_file):
logger.info('The file already exists: {}'.format(patch_file))
elif exists(patch_file_copy):
logger.info('The file already exists: {}'.format(patch_file_copy))
else:
if 'CHECKOUT_BUGGY' not in prog_config['skip']:
code = self.checkout_buggy(self.project, self.bug_id)
if code != 0:
raise CustomException('check-out-buggy failed')
if 'CHECKOUT_FIX' not in prog_config['skip']:
code = self.checkout_fix(self.project, self.bug_id)
if code != 0:
raise CustomException('check-out-fix failed')
self.set_modified_classes()
if 'GEN_PATCH' not in prog_config['skip']:
code = self.gen_patch(self.project, self.bug_id)
if code != 0:
raise CustomException('gen-patch failed')
config_file = join(self.dev_configs_dir,
self.config_name(self.project, self.bug_id))
config_file_copy = join('configs',
self.config_name(self.project, self.bug_id))
if exists(config_file):
logger.info('The file already exists: {}'.format(config_file))
elif exists(config_file_copy):
logger.info('The file already exists: {}'.format(config_file_copy))
else:
if not exists(self.dfj4_fix_dir):
code = self.checkout_fix(self.project, self.bug_id)
if code != 0:
raise CustomException('check-out-fix failed')
self.set_modified_classes()
if 'GEN_CONFIG' not in prog_config['skip']:
code = self.gen_config(self.project, self.bug_id)
if code != 0:
raise CustomException('gen-config failed')
src_dir = join(root_dir, 'deltas', self.project,
self.src_patch_id)
delta_dir = join(root_dir, 'deltas', self.project,
self.patch_id(self.project, self.bug_id))
if not exists(src_dir):
raise CustomException('{} must exist'.format(src_dir))
if exists(delta_dir):
logger.info('The dir already exists: {}'.format(delta_dir))
else:
shutil.copytree(src_dir, delta_dir)
logger.info('copied {} to {}'.format(src_dir, delta_dir))
def checkout_buggy(self, project, bug_id):
if exists(self.dfj4_buggy_dir):
shutil.rmtree(self.dfj4_buggy_dir)
cmd = 'defects4j checkout -p ' + project + ' -v ' + bug_id + 'b' + \
' -w ' + self.dfj4_buggy_dir
return self.run(cmd)
def checkout_fix(self, project, bug_id):
if exists(self.dfj4_fix_dir):
shutil.rmtree(self.dfj4_fix_dir)
cmd = 'defects4j checkout -p ' + project + ' -v ' + bug_id + 'f' + \
' -w ' + self.dfj4_fix_dir
return self.run(cmd)
def set_modified_classes(self):
logger.info('Retrieving a modified class')
with tempfile.NamedTemporaryFile() as fp:
cmd = 'defects4j export -p classes.modified -w ' + self.dfj4_fix_dir + \
' -o ' + fp.name
code = self.run(cmd, quiet=True)
if code == 0:
with open(fp.name, mode='r') as f:
modified_classes = f.read().splitlines()
self.modified_classes = modified_classes
return code
def gen_patch(self, project, bug_id):
def src_dir(project):
if project in ['Chart']:
return 'source'
elif project in ['Math', 'Time', 'Lang']:
return 'src'
else:
raise CustomException('Unsupported project: {}'.format(project))
old_dir = os.getcwd()
os.chdir(workdir)
patch_file = join(self.dev_patches_dir,
self.patch_name(project, bug_id))
if not exists(self.buggy_dir_name):
raise CustomException('Dir does not exist: {}'.format(self.buggy_dir_name))
cmd = 'diff -w -r -u {} {} > {}'.format(join(self.buggy_dir_name, src_dir(project)),
join(self.fix_dir_name, src_dir(project)),
patch_file)
code = self.run(cmd)
os.chdir(old_dir)
if code == 0:
raise CustomException('No diff observed between {} and {}'.
format(join(self.dfj4_buggy_dir, src_dir(project)),
join(self.dfj4_fix_dir, src_dir(project))))
return 0
def patch_id(self, project, bug_id):
return '{}_bug{}'.format(project, bug_id)
def gen_config(self, project, bug_id):
def file_name(cls):
return join(cls.replace('.', '/') + '.java')
data = dict()
data['ID'] = self.patch_id(project, bug_id)
data['project'] = project
data['bug_id'] = bug_id
data['correctness'] = 'Correct'
data['tool'] = 'developer'
data['target'] = [file_name(c) for c in self.modified_classes]
config_file = join(self.dev_configs_dir,
self.config_name(self.project, self.bug_id))
with open(config_file, 'w') as f:
json.dump(data, f)
return 0
def run(self, cmd, env=os.environ, quiet=False):
logger.debug('cmd: {}'.format(cmd))
if prog_config['quiet'] or quiet:
proc = Popen(cmd, env=env, stdout=DEVNULL, stderr=DEVNULL,
shell=True)
else:
proc = Popen(cmd, env=env, shell=True)
try:
code = proc.wait()
return code
except TimeoutExpired:
logger.warning('timeout for: {}'.format(cmd))
return 1
def prep_dir(self, d):
if not exists(d):
pathlib.Path(d).mkdir(parents=True, exist_ok=True)
return d
def patch_name(self, project, bug_id):
return '{}_bug{}'.format(project, bug_id)
def config_name(self, project, bug_id):
return '{}_bug{}.json'.format(project, bug_id)
if __name__ == '__main__':
parser = argparse.ArgumentParser('prep_dev_patch')
parser.add_argument('config_file', metavar='CONFIG_FILE', help='config file')
parser.add_argument('--log', metavar='LOG', default=None,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='set the logging level')
parser.add_argument('--quiet', action='store_true',
help='print only errors (default: %(default)s)')
parser.add_argument('--skip', metavar='SKIP STEPS', nargs='+', default=[],
choices=['CHECKOUT_BUGGY', 'CHECKOUT_FIX'],
help='skip steps')
args = parser.parse_args()
prog_config['quiet'] = args.quiet
prog_config['skip'] = args.skip
rootLogger = logging.getLogger()
FORMAT = logging.Formatter('%(levelname)-8s %(name)-15s %(message)s')
if args.quiet:
rootLogger.setLevel(logging.ERROR)
elif args.log is not None:
log_level = getattr(logging, args.log, None)
rootLogger.setLevel(log_level)
else:
rootLogger.setLevel(logging.INFO)
fileHandler = logging.FileHandler("{0}/{1}.log".format(os.getcwd(), 'runexp'))
fileHandler.setFormatter(FORMAT)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(FORMAT)
rootLogger.addHandler(consoleHandler)
with open(args.config_file, "r") as read_file:
data = json.load(read_file)
project = data['project']
bug_id = data['bug_id']
src_patch_id = data['ID']
try:
patch_gen = PatchGen(project, bug_id, src_patch_id)
patch_gen()
except CustomException as e:
logger.error(e.get_msg())
| StarcoderdataPython |
3323354 | # -*- coding: utf-8 -*-
# Morra project: Features for MorphParserNE
#
# Copyright (C) 2020-present by <NAME>
# License: BSD, see LICENSE for details
"""
If you need MorphParserNE to support your language, add feature-functions for
your language here. Then, create parser as:
``MorphParserNE(features='<your lang>')``
"""
from morra.base_features import BaseFeatures
class FeaturesNE(BaseFeatures):
"""Features for MorphParserNE"""
def __init__(self, lang='RU'):
super().__init__(lang=lang)
if lang == 'RU':
self.get_ne_features = self.get_ne_features_RU
self.get_ne2_features = self.get_ne2_features_RU
def get_ne_features_RU(self, i, context, lemma_context, pos_context,
feats_context, prev, prev2):
prev = str(prev)
prev2 = str(prev2)
context = self.START + context + self.END
lemma_context = self.START + lemma_context + self.END
pos_context = self.START + pos_context + self.END
feats_context = [{}, {}] + feats_context + [{}, {}]
i += len(self.START)
wform = context[i]
wform_i = self.normalize(wform)
wform_b1 = self.normalize(context[i - 1])
wform_b2 = self.normalize(context[i - 2])
wform_f1 = self.normalize(context[i + 1])
wform_f2 = self.normalize(context[i + 2])
lemma_i = lemma_context[i]
lemma_b1 = lemma_context[i - 1]
lemma_b2 = lemma_context[i - 2]
lemma_f1 = lemma_context[i + 1]
lemma_f2 = lemma_context[i + 2]
pos_i = pos_context[i]
pos_b1 = pos_context[i - 1]
pos_b2 = pos_context[i - 2]
pos_f1 = pos_context[i + 1]
pos_f2 = pos_context[i + 2]
feats_i = feats_context[i]
feats_b1 = feats_context[i - 1]
feats_b2 = feats_context[i - 2]
feats_f1 = feats_context[i + 1]
feats_f2 = feats_context[i + 2]
features = self.init_features()
self.add_feature(features, '_')
self.add_feature(features, 'w', wform_i)
self.add_feature(features, 's', self.wform_shape(lemma_i))
len_ = len(wform)
for val in range(1, 8):
self.add_feature(features, 'p' + str(val),
'' if val > len_ else wform[:val])
for val in range(1, 8):
self.add_feature(features, 's' + str(val),
'' if val > len_ else wform[-val:])
for val in range(3, 7):
self.add_feature(features, 'S' + str(val),
'' if val > len_ else wform[val:])
for val in range(3, 7):
_len = len_ - 1 - val
for start in range(1, _len):
self.add_feature(features, '<' + str(val) + ':' + str(start),
wform[len_ - start - val:len_ - start])
self.add_feature(features, 'p', pos_i)
#for feat, val in feats_i.items():
# self.add_feature(features, 'i feat', feat, val)
#case = feats_i.get('Case')
#if case:
# self.add_feature(features, 'i feat-Case', case)
self.add_feature(features, '-1n', prev)
self.add_feature(features, '-2n', prev2)
self.add_feature(features, '-1n-2n', prev, prev2)
self.add_feature(features, '-1nl', prev, lemma_i)
self.add_feature(features, '-1l', lemma_b1)
self.add_feature(features, '-1s4', wform_b1[-4:])
self.add_feature(features, '-1p', pos_b1)
self.add_feature(features, '+1l', lemma_f1)
self.add_feature(features, '+1s3', wform_f1[-3:])
self.add_feature(features, '+1p', pos_f1)
self.add_feature(features, '-2l', lemma_b2)
self.add_feature(features, '-2p', pos_b2)
self.add_feature(features, '+2l', lemma_f2)
self.add_feature(features, '+2p', pos_f2)
return features
def get_ne2_features_RU(self, i, context, lemma_context, pos_context,
feats_context, ne_context):
context = self.START + context + self.END
lemma_context = self.START + lemma_context + self.END
pos_context = self.START + pos_context + self.END
feats_context = [{}, {}] + feats_context + [{}, {}]
ne_context = self.START + ne_context + self.END
i += len(self.START)
wform = context[i]
wform_i = self.normalize(wform)
wform_b1 = self.normalize(context[i - 1])
wform_b2 = self.normalize(context[i - 2])
wform_f1 = self.normalize(context[i + 1])
wform_f2 = self.normalize(context[i + 2])
lemma_i = lemma_context[i]
lemma_b1 = lemma_context[i - 1]
lemma_b2 = lemma_context[i - 2]
lemma_f1 = lemma_context[i + 1]
lemma_f2 = lemma_context[i + 2]
pos_i = pos_context[i]
pos_b1 = pos_context[i - 1]
pos_b2 = pos_context[i - 2]
pos_f1 = pos_context[i + 1]
pos_f2 = pos_context[i + 2]
feats_i = feats_context[i]
feats_b1 = feats_context[i - 1]
feats_b2 = feats_context[i - 2]
feats_f1 = feats_context[i + 1]
feats_f2 = feats_context[i + 2]
ne_i = str(ne_context[i])
ne_b1 = str(ne_context[i - 1])
ne_b2 = str(ne_context[i - 2])
ne_f1 = str(ne_context[i + 1])
ne_f2 = str(ne_context[i + 2])
features = self.init_features()
self.add_feature(features, '_')
self.add_feature(features, 'w', wform)
self.add_feature(features, 's', self.wform_shape(wform))
len_ = len(wform_i)
for val in range(1, 8):
self.add_feature(features, 'p' + str(val),
'' if val > len_ else wform_i[:val])
for val in range(1, 8):
self.add_feature(features, 's' + str(val),
'' if val > len_ else wform_i[-val:])
for val in range(3, 7):
_len = len_ - 1 - val
for start in range(1, _len):
self.add_feature(features, '<' + str(val) + ':' + str(start),
wform_i[len_ - start - val:len_ - start])
for val in range(2, 7):
self.add_feature(features, 'S' + str(val),
'' if val > len_ else wform_i[val:])
self.add_feature(features, 'p', pos_i)
self.add_feature(features, '-1n', ne_b1)
self.add_feature(features, '-2n', ne_b2)
self.add_feature(features, '-1n-2n', ne_b1, ne_b2)
self.add_feature(features, '-1nl', ne_b1, lemma_i)
self.add_feature(features, '+1n', ne_f1)
self.add_feature(features, '+2n', ne_f2)
self.add_feature(features, '+1n+2n', ne_f1, ne_f2)
self.add_feature(features, '+1nl', ne_f1, lemma_i)
self.add_feature(features, '-1n+1n', ne_b1, ne_f1)
self.add_feature(features, '-1l', lemma_b1)
self.add_feature(features, '-1s4', wform_b1[-4:])
self.add_feature(features, '-1p', pos_b1)
self.add_feature(features, '+1l', lemma_f1)
self.add_feature(features, '+1s3', wform_f1[-3:])
self.add_feature(features, '+1p', pos_f1)
self.add_feature(features, '-2l', lemma_b2)
self.add_feature(features, '-2pos', pos_b2)
self.add_feature(features, '+2l', lemma_f2)
self.add_feature(features, '+2p', pos_f2)
return features
| StarcoderdataPython |
3265169 | <gh_stars>1-10
from Event import PictureEvent, LightEvent, FeedEvent
import datetime
names = ['Feed','Light','Take picture']
events = []
enabled = True
idcounter = 0
today = datetime.datetime.today().date()
def createEvent(type):
if type == 0:
return FeedEvent()
elif type == 1:
return LightEvent()
elif type == 2:
return PictureEvent()
return None
def load(ini):
global idcounter, enabled, events
section = 'events'
if not ini.has_section(section):
raise Exception("Broken state.ini file")
idcounter = ini.getint(section,'idcounter')
enabled = ini.getboolean(section,'enabled')
count = ini.getint(section,'count')
events = []
for i in range(count):
event = createEvent(ini.getint('event' + str(i),'type'))
event.readFromIni(ini, 'event' + str(i))
events.append(event)
def save(ini):
section = 'events'
if not ini.has_section(section):
ini.add_section(section)
ini.set(section,'idcounter',str(idcounter))
ini.set(section,'enabled',str(enabled))
ini.set(section,'count',str(len(events)))
i = 0
for event in events:
event.writeToIni(ini, 'event' + str(i))
i += 1
def getSerializeable():
return [event.getSerializeable() for event in events]
def update(params):
global idcounter
id = int(params['event'])
type = int(params['type'])
event = createEvent(type)
event.setDayInt(int(params['day']))
event.hour = int(params['hour'])
event.minute = int(params['minute'])
event.executed = event.timePassed()
if type == 0:
event.setFoodInt(int(params['food']))
event.maxSaturation = float(params['maxsaturation'])
event.minAmount = float(params['minamount'])
event.maxAmount = float(params['maxamount'])
elif type == 1:
event.value = params['value'] == 'true'
if id == -1:
event.id = idcounter
idcounter += 1
events.append(event)
else:
event.id = id
for i in range(len(events)):
if events[i].id == id:
events[i] = event
return event
def getEvent(id):
for event in events:
if event.id == id:
return event
return None
def tick():
global today
if today != datetime.datetime.today().date():
today = datetime.datetime.today().date()
for event in events:
event.executed = event.timePassed()
if enabled:
for event in events:
event.tick() | StarcoderdataPython |
1766342 | <filename>mapping.py
import os
import sys
f = open("map_clsloc.txt")
imagenet = {}
for lines in f:
splitted = lines.strip().split()
imagenet[splitted[0]] = splitted[1]
f.close()
f = open("wnids.txt")
final_indices = {}
i = 1
for lines in f:
final_indices[int(imagenet[lines.strip()])] = i
i+=1
print(final_indices)
| StarcoderdataPython |
81050 | <gh_stars>1-10
# @Time : 2020/11/4
# @Author : <NAME>
# @email : <EMAIL>
# UPDATE:
# @Time : 2021/1/29
# @Author : <NAME>
# @Email : <EMAIL>
"""
textbox.data.dataloader.single_sent_dataloader
################################################
"""
import numpy as np
import random
import math
import torch
from textbox.data.dataloader.abstract_dataloader import AbstractDataLoader
class SingleSentenceDataLoader(AbstractDataLoader):
""":class:`GeneralDataLoader` is used for general model and it just return the origin data.
Args:
config (Config): The config of dataloader.
dataset (SingleSentenceDataset): The dataset of dataloader. Corpus, see textbox.data.corpus for more details
batch_size (int, optional): The batch_size of dataloader. Defaults to ``1``.
shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``.
"""
def __init__(self, config, dataset, batch_size=1, shuffle=False, drop_last=True, DDP=False):
super().__init__(config, dataset, batch_size, shuffle, drop_last, DDP)
self._data_preprocess(dataset)
def _data_preprocess(self, dataset):
required_key_list = ['text_data', 'idx2token', 'token2idx']
for dataset_attr in required_key_list:
assert dataset_attr in dataset
setattr(self, dataset_attr, dataset[dataset_attr])
self.text_idx_data, self.idx_length_data = self._build_data(self.text_data, self.token2idx)
def get_reference(self):
return self.text_data
@property
def pr_end(self):
return len(self.text_idx_data)
def _shuffle(self):
temp = list(zip(self.text_data, self.text_idx_data, self.idx_length_data))
random.shuffle(temp)
self.text_data[:], self.text_idx_data[:], self.idx_length_data[:] = zip(*temp)
def _next_batch_data(self):
tp_text_data = self.text_data[self.pr:self.pr + self.step]
tp_text_idx_data = self.text_idx_data[self.pr:self.pr + self.step]
tp_idx_length_data = self.idx_length_data[self.pr:self.pr + self.step]
padded_idx, length = self._pad_batch_sequence(tp_text_idx_data, tp_idx_length_data)
batch_data = {
'target_text': tp_text_data,
'target_idx': padded_idx.to(self.device),
'target_length': length.to(self.device)
}
return batch_data
| StarcoderdataPython |
114740 | <gh_stars>1-10
"""Tally tests"""
import os
import warnings
from unittest import TestCase
import nose
from nose.tools import (
assert_equal,
assert_not_equal,
assert_raises,
raises,
assert_almost_equal,
assert_true,
assert_false,
assert_in,
)
from pyne.utils import QAWarning
warnings.simplefilter("ignore", QAWarning)
from pyne.particle import is_valid, name, is_heavy_ion, id, mcnp, mcnp6, fluka, geant4
def test_is_valid():
assert_equal(is_valid("Proton"), True)
assert_equal(is_valid("Protium"), True)
assert_equal(is_valid("Hydrogen"), True)
assert_equal(is_valid(2212), True)
assert_equal(is_valid(-2212), True)
assert_equal(is_valid("Neutron"), True)
assert_equal(is_valid("AntiProton"), True)
assert_equal(is_valid("AntiNeutron"), True)
def test_name():
assert_equal(name("Proton"), "Proton")
assert_equal(name("Neutron"), "Neutron")
assert_equal(name(2112), "Neutron")
assert_equal(name("Hydrogen"), "Proton")
assert_equal(name("Protium"), "Proton")
assert_equal(name(10010000), "Proton")
assert_equal(name("10010000"), "Proton")
def test_is_heavy_ion():
assert_equal(is_heavy_ion("Proton"), False)
assert_equal(is_heavy_ion("Hydrogen"), False)
assert_equal(is_heavy_ion("1H"), False)
assert_equal(is_heavy_ion(10010000), False)
assert_equal(is_heavy_ion("10010000"), False)
assert_equal(is_heavy_ion("2H"), True)
assert_equal(is_heavy_ion("3He"), True)
assert_equal(is_heavy_ion("22Na"), True)
def test_id_number():
assert_equal(id("Proton"), 2212)
assert_not_equal(id("AntiProton"), 2212)
assert_equal(id("AntiProton"), -2212)
assert_not_equal(id("Proton"), -2212)
assert_equal(id("Hydrogen"), 2212)
assert_equal(id("22Na"), 0)
def test_mcnp_id():
assert_equal(mcnp("Neutron"), "n")
assert_equal(mcnp(2112), "n")
assert_equal(mcnp("Photon"), "p")
assert_equal(mcnp("Gamma"), "p")
assert_equal(mcnp("Electron"), "e")
def test_mcnp6_id():
assert_equal(mcnp6("Neutron"), "n")
assert_equal(mcnp6(2112), "n")
assert_equal(mcnp6("Photon"), "p")
assert_equal(mcnp6("Gamma"), "p")
assert_equal(mcnp6("Electron"), "e")
assert_equal(mcnp6("Proton"), "h")
assert_equal(mcnp6("Hydrogen"), "h")
def test_fluka_id():
assert_equal(fluka("Neutron"), "NEUTRON")
assert_equal(fluka(2112), "NEUTRON")
assert_equal(fluka("Photon"), "PHOTON")
assert_equal(fluka("Gamma"), "PHOTON")
assert_equal(fluka("Electron"), "ELECTRON")
assert_equal(fluka("Beta-"), "ELECTRON")
assert_equal(fluka("Proton"), "PROTON")
assert_equal(fluka("Hydrogen"), "PROTON")
def test_geant4_id():
assert_equal(geant4("Neutron"), "neutron")
assert_equal(geant4(2112), "neutron")
assert_equal(geant4("Photon"), "gamma")
assert_equal(geant4("Gamma"), "gamma")
assert_equal(geant4("Electron"), "e-")
assert_equal(geant4("Beta-"), "e-")
assert_equal(geant4("Proton"), "proton")
assert_equal(geant4("Hydrogen"), "proton")
# Run as script
#
if __name__ == "__main__":
nose.runmodule()
| StarcoderdataPython |
11209 | from .trainer.models import MultiTaskTagger
from .trainer.utils import load_dictionaries,Config
from .trainer.tasks.multitask_tagging import MultiTaskTaggingModule
from fairseq.data.data_utils import collate_tokens
from attacut import tokenize
class HoogBERTaEncoder(object):
def __init__(self,layer=12,cuda=False,base_path="."):
args = Config(base_path=base_path)
self.base_path = base_path
self.pos_dict, self.ne_dict, self.sent_dict = load_dictionaries(self.base_path)
self.model = MultiTaskTagger(args,[len(self.pos_dict), len(self.ne_dict), len(self.sent_dict)])
if cuda == True:
self.model = self.model.cuda()
def extract_features(self,sentence):
all_sent = []
sentences = sentence.split(" ")
for sent in sentences:
all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
sentence = " _ ".join(all_sent)
tokens = self.model.bert.encode(sentence).unsqueeze(0)
all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True)
return tokens[0], all_layers[-1][0]
def extract_features_batch(self,sentenceL):
inputList = []
for sentX in sentenceL:
sentences = sentX.split(" ")
all_sent = []
for sent in sentences:
all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
sentence = " _ ".join(all_sent)
inputList.append(sentence)
batch = collate_tokens([self.model.bert.encode(sent) for sent in inputList], pad_idx=1)
#tokens = self.model.bert.encode(inputList)
return self.extract_features_from_tensor(batch)
def extract_features_from_tensor(self,batch):
all_layers = self.model.bert.extract_features(batch, return_all_hiddens=True)
return batch, all_layers[-1]
def extract_features2(self,sentence):
# all_sent = []
# sentences = sentence.split(" ")
# for sent in sentences:
# all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
# sentence = " _ ".join(all_sent)
tokens = self.model.bert.encode(sentence).unsqueeze(0)
all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True)
return tokens[0], all_layers[-1][0]
def extract_features_batch2(self,sentenceL):
# inputList = []
# for sentX in sentenceL:
# sentences = sentX.split(" ")
# all_sent = []
# for sent in sentences:
# all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
# sentence = " _ ".join(all_sent)
# inputList.append(sentence)
batch = collate_tokens([self.model.bert.encode(sent) for sent in sentenceL], pad_idx=1)
#tokens = self.model.bert.encode(inputList)
return self.extract_features_from_tensor(batch)
| StarcoderdataPython |
3315331 | #!/usr/bin/env python
from __future__ import print_function
import logging
LOG_FORMAT = '%(asctime)s %(levelname)s %(pathname)s:%(lineno)s: %(message)s'
logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
import os
import signal
import sys
import six
import threading
import time
import pprint
import psutil
import atexit
import argparse
import jinja2
from flask import Flask, Response, url_for, render_template, jsonify, request, current_app
from flask.json import JSONEncoder
from flask_reverse_proxy import ReverseProxied
from tornado.netutil import Resolver
from tornado import gen
import socket
# Defaults
DEFAULT_WAIT_DELAY = 0.1
DEFAULT_CONTROL_SOCKET_PATH = '/var/run/connector.sock'
DEFAULT_NGINX_PID_FILE = '/var/log/nginx/nginx.pid'
DEFAULT_NGINX_CONF_FILE = '/etc/nginx/conf.d/default.conf'
DEFAULT_NGINX_PORT = 8080
DEFAULT_PORT = 9090
DEFAULT_HOST = '0.0.0.0'
class PortWatcher(object):
INSTANCE = None
@classmethod
def instance(cls):
return cls.INSTANCE
@classmethod
def start_instance(cls, *args, **kwargs):
if cls.INSTANCE is None:
cls.INSTANCE = cls(*args, **kwargs)
return cls.INSTANCE
def __init__(self, ports_changed_cb=None, wait_delay=DEFAULT_WAIT_DELAY):
self._ports_changed_cb = ports_changed_cb
self._wait_delay = wait_delay
self._thread_lock = threading.RLock()
self._thread = threading.Thread(name='PortWatchLoop', target=self._update_ports)
self._thread.daemon = True
self._stop_thread = False
self._ports = set()
self._thread.start()
logging.info('Started port watcher')
@property
def ports(self):
with self._thread_lock:
return self._ports.copy()
def _update_ports(self):
while True:
with self._thread_lock:
if self._stop_thread:
return
# get sshd ports
ports = set([])
try:
for proc in psutil.process_iter():
if proc.name() == 'sshd':
for conn in proc.connections():
laddr_ip = conn.laddr[0]
laddr_port = conn.laddr[1]
if conn.status == psutil.CONN_LISTEN and \
laddr_port != 22 and \
laddr_ip in ('127.0.0.1', 'localhost'):
# logging.info('Connection %r', conn)
ports.add(laddr_port)
except (psutil.AccessDenied, psutil.NoSuchProcess):
try:
for conn in psutil.net_connections('inet'):
laddr_ip = conn.laddr[0]
laddr_port = conn.laddr[1]
if conn.status == psutil.CONN_LISTEN and \
laddr_port != 22 and \
laddr_ip in ('127.0.0.1', 'localhost'):
if conn.pid is None and laddr_port > 1023:
# logging.info('Connection %r', conn)
ports.add(laddr_port) # unsafe
else:
for p in psutil.process_iter():
if p.pid == conn.pid and 'sshd' in p.name():
# logging.info('Connection %r', conn)
ports.add(laddr_port)
break
except psutil.AccessDenied:
logging.exception('Could not get any information about ports')
notify_ports = None
with self._thread_lock:
if self._ports != ports:
logging.info('Ports changed from %s to %s', self._ports, ports)
if self._ports_changed_cb:
notify_ports = ports.copy()
self._ports = ports
if notify_ports is not None:
self._ports_changed_cb(notify_ports)
del notify_ports
time.sleep(self._wait_delay)
def stop(self):
with self._thread_lock:
if self._stop_thread:
return
self._stop_thread = True
self._thread.join()
class RegisteredPort(object):
def __init__(self, port, name, type, description):
self.port = port
self.name = name
self.type = type
self.description = description
def __repr__(self):
return 'RegisteredPort(port=%r, name=%r, type=%r, description=%r' % \
(self.port, self.name, self.type, self.description)
def to_json(self):
return {'port': self.port,
'registered': True,
'name': self.name,
'type': self.type,
'description': self.description
}
class PortManagerException(Exception):
def __init__(self, message):
self.message = message
super(PortManagerException, self).__init__(message)
class PortManager(object):
def __init__(self, port_registered_cb=None, port_unregistered_cb=None):
self.registered_ports_by_port = {}
self.registered_ports_by_name = {}
self.bound_ports = set()
self.port_registered_cb = port_registered_cb
self.port_unregistered_cb = port_unregistered_cb
def register(self, port, name, type='tcp', description=None):
if port not in self.bound_ports:
raise PortManagerException('Port {} is not bound'.format(port))
if port in self.registered_ports_by_port:
raise PortManagerException('Port {} is already registered'.format(port))
if name in self.registered_ports_by_name:
raise PortManagerException('Port with name {} does already exist'.format(name))
rport = RegisteredPort(port=port, name=name, type=type, description=description)
self.registered_ports_by_port[port] = rport
self.registered_ports_by_name[name] = rport
if self.port_registered_cb:
self.port_registered_cb(rport)
return rport
def get_by_port(self, port):
return self.registered_ports_by_port.get(port, None)
def get_by_name(self, name):
return self.registered_ports_by_name.get(name, None)
def update_accessible_ports(self, ports):
removed_ports = self.bound_ports - ports
self.bound_ports = ports.copy()
for port in removed_ports:
self.unregister_by_port(port)
def unregister_by_port(self, port):
rport = self.registered_ports_by_port.get(port, None)
if rport is None:
return False
assert port == rport.port
del self.registered_ports_by_port[port]
del self.registered_ports_by_name[rport.name]
if self.port_unregistered_cb:
self.port_unregistered_cb(rport)
return True
def unregister_by_name(self, name):
rport = self.registered_ports_by_name.get(name, None)
if rport is None:
return False
assert name == rport.name
del self.registered_ports_by_name[name]
del self.registered_ports_by_port[rport.port]
if self.port_unregistered_cb:
self.port_unregistered_cb(rport)
return True
def unregister(self, obj):
if isinstance(obj, RegisteredPort):
return self.unregister_by_port(obj.port)
if isinstance(obj, six.integer_types):
return self.unregister_by_port(obj)
else:
return self.unregister_by_name(obj)
def to_json(self):
d = []
for v in six.itervalues(self.registered_ports_by_port):
d.append(v.to_json())
for p in self.bound_ports:
if p not in self.registered_ports_by_port:
d.append({'port': p, 'registered': False})
return d
class NginxTemplateGenerator(object):
def __init__(self,
nginx_port=DEFAULT_NGINX_PORT,
nginx_conf_file=DEFAULT_NGINX_CONF_FILE,
nginx_pid_file=DEFAULT_NGINX_PID_FILE,
controller_port=DEFAULT_PORT):
self.nginx_port = nginx_port
self.nginx_conf_file = nginx_conf_file
self.nginx_pid_file = nginx_pid_file
self.controller_port = controller_port
dir = os.path.dirname(__file__)
self.env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.join(dir, 'templates')))
self.template = self.env.get_template('default.conf.j2')
def generate(self, port_manager):
output = self.template.render(
controller_port=self.controller_port,
nginx_port=self.nginx_port,
bound_ports=port_manager.bound_ports,
registered_ports_by_port=port_manager.registered_ports_by_port,
registered_ports_by_name=port_manager.registered_ports_by_name)
with open(self.nginx_conf_file, 'wb') as fd:
fd.write(output)
def reload_nginx_config(self):
try:
with open(self.nginx_pid_file, 'r') as fd:
pid = int(fd.read().strip())
os.kill(pid, signal.SIGHUP)
return True
except Exception:
logging.exception('Could not reload nginx configuration')
return False
# Global Data
LOCK = threading.RLock()
PORT_MANAGER = None # Shared, locked by LOCK
NGINX_TEMPLATE_GEN = None # Shared, locked by LOCK
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, PortManager):
return obj.to_json()
elif isinstance(obj, RegisteredPort):
return obj.to_json()
return JSONEncoder.default(self, obj)
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.json_encoder = CustomJSONEncoder
control_app = Flask(__name__)
control_app.json_encoder = CustomJSONEncoder
HTTP_OK = 200
HTTP_NO_CONTENT = 204
HTTP_BAD_REQUEST = 400
HTTP_NOT_FOUND = 404
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_NOT_IMPLEMENTED = 501
def error_response(message, status_code=HTTP_INTERNAL_SERVER_ERROR):
response = jsonify({'error': message, 'status': status_code})
response.status_code = status_code
return response
def bad_request(message):
return error_response(message=message, status_code=HTTP_BAD_REQUEST)
class BadRequestError(Exception):
def __init__(self, message):
self.message = message
super(BadRequestError, self).__init__(message)
@app.errorhandler(BadRequestError)
@control_app.errorhandler(BadRequestError)
def on_bad_request_error(error):
return bad_request(error.message)
@app.errorhandler(PortManagerException)
@control_app.errorhandler(PortManagerException)
def on_port_manager_exception(error):
return bad_request('PortManager error: ' + error.message)
@app.route("/", methods=["GET"])
def get_root():
global PORT_MANAGER, LOCK
with LOCK:
return render_template("index.html",
bound_ports=PORT_MANAGER.bound_ports,
registered_ports_by_port=PORT_MANAGER.registered_ports_by_port,
registered_ports_by_name=PORT_MANAGER.registered_ports_by_name)
@control_app.route("/", methods=["GET"])
def control_get_root():
return "Control server"
@control_app.route("/ports/")
@app.route("/ports/")
def get_ports():
with LOCK:
return jsonify(PORT_MANAGER)
@app.route("/reload_nginx_config")
def get_reload_nginx_config():
if NGINX_TEMPLATE_GEN.reload_nginx_config():
return 'Success'
else:
return 'Reload failed'
def default_value(value, default=None):
if not value:
return default
return value
@control_app.route("/ports/<port>", methods=["GET", "PUT", "DELETE"])
def control_ports(port):
if request.method == "GET":
with LOCK:
try:
port = int(port)
if port not in PORT_MANAGER.bound_ports:
return error_response('Port is not bound', HTTP_NOT_FOUND)
rport = PORT_MANAGER.get_by_port(port)
if rport is None:
return jsonify({'port': port, 'registered': False})
except ValueError:
rport = PORT_MANAGER.get_by_name(port)
if rport is None:
return error_response('No port with name {}'.format(port), HTTP_NOT_FOUND)
else:
return jsonify(rport)
elif request.method == "PUT":
data = request.get_json(silent=True)
if data is None:
return bad_request("No json data in body")
try:
port = int(port)
except ValueError:
return bad_request('Port must be an integer')
name = six.text_type(default_value(data.get("name"), default=port))
description = six.text_type(default_value(data.get("description"), default=""))
type = six.text_type(default_value(data.get("type"), default="tcp"))
with LOCK:
if port not in PORT_MANAGER.bound_ports:
return error_response('Port is not bound', HTTP_NOT_FOUND)
if PORT_MANAGER.get_by_port(port) is not None:
return bad_request("Port is already registered")
if PORT_MANAGER.get_by_name(name) is not None:
return bad_request("Name is already used")
return jsonify(PORT_MANAGER.register(port, name, type, description))
elif request.method == "DELETE":
with LOCK:
try:
port = int(port)
if port not in PORT_MANAGER.bound_ports:
return error_response('Port is not bound', HTTP_NOT_FOUND)
if not PORT_MANAGER.unregister_by_port(port):
return bad_request("Port is not registered")
except ValueError:
if not PORT_MANAGER.unregister_by_name(port):
return error_response('No port with name {}'.format(port), HTTP_NOT_FOUND)
return jsonify({})
@app.route("/api/debug/flask/", methods=["GET"])
@control_app.route("/api/debug/flask/", methods=["GET"])
def debug_flask():
import urllib
output = ['Rules:']
for rule in current_app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
if rule.methods:
methods = ','.join(rule.methods)
else:
methods = 'GET'
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
output.append('')
output.append('Request environment:')
for k, v in six.iteritems(request.environ):
output.append("{0}: {1}".format(k, pprint.pformat(v, depth=5)))
return Response('\n'.join(output), mimetype='text/plain')
class UnixResolver(Resolver):
def initialize(self, socket_file, resolver):
self.socket_file = socket_file
self.resolver = resolver
def close(self):
self.resolver.close()
@gen.coroutine
def resolve(self, host, port, *args, **kwargs):
if host == 'unixsocket':
raise gen.Return([(socket.AF_UNIX, self.socket_file)])
result = yield self.resolver.resolve(host, port, *args, **kwargs)
raise gen.Return(result)
def start_server(args):
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.netutil import bind_unix_socket
logging.info('Run on host %s:%i', args.host, args.port)
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(args.port, args.host)
if args.control_unix_socket:
control_server = HTTPServer(WSGIContainer(control_app))
socket = bind_unix_socket(args.control_unix_socket, mode=0o666)
control_server.add_socket(socket)
logging.info('Run control server on unix socket %s', args.control_unix_socket)
global NGINX_TEMPLATE_GEN, PORT_MANAGER, LOCK
NGINX_TEMPLATE_GEN = NginxTemplateGenerator(nginx_port=args.nginx_port,
nginx_conf_file=args.nginx_conf_file,
nginx_pid_file=args.nginx_pid_file,
controller_port=args.port)
def nginx_regenerate_conf():
NGINX_TEMPLATE_GEN.generate(PORT_MANAGER)
NGINX_TEMPLATE_GEN.reload_nginx_config()
PORT_MANAGER = PortManager(port_registered_cb=lambda rport: nginx_regenerate_conf(),
port_unregistered_cb=lambda rport: nginx_regenerate_conf())
nginx_regenerate_conf()
def on_ports_changed(new_ports):
with LOCK:
PORT_MANAGER.update_accessible_ports(new_ports)
PortWatcher.start_instance(ports_changed_cb=on_ports_changed, wait_delay=args.wait_delay)
loop = IOLoop.current()
def stop_ioloop():
logging.info('Stopping IOLoop')
loop.stop()
def signal_term_handler(signal, frame):
print('Got signal {}, exiting'.format(signal), file=sys.stderr)
stop_ioloop()
sys.exit(0)
def on_exit():
if args.control_unix_socket:
os.unlink(args.control_unix_socket)
signal.signal(signal.SIGTERM, signal_term_handler)
signal.signal(signal.SIGINT, signal_term_handler)
atexit.register(on_exit)
loop.start()
def register_port(args):
from tornado import gen, ioloop
from tornado.httpclient import HTTPError
from tornado.httpclient import AsyncHTTPClient
import json
if not os.path.exists(args.control_unix_socket):
print("Socket file {} does not exist !".format(args.control_unix_socket), file=sys.stderr)
sys.exit(1)
@gen.coroutine
def do_register():
resolver = UnixResolver(socket_file=args.control_unix_socket, resolver=Resolver())
AsyncHTTPClient.configure(None, resolver=resolver)
client = AsyncHTTPClient()
mtype = 'application/json'
headers = {'Content-Type': mtype}
body = json.dumps({"name": args.name, "type": args.type, "description": args.description})
try:
response = yield client.fetch('http://unixsocket/ports/{}'.format(args.port),
method='PUT',
headers=headers,
body=body)
except HTTPError as he:
print("Could not register port: {}".format(he), file=sys.stderr)
sys.exit(1)
except Exception as e:
logging.exception("Could not register port")
sys.exit(1)
print(response.body)
ioloop.IOLoop.current().run_sync(do_register)
def unregister_port(args):
from tornado import gen, ioloop
from tornado.httpclient import HTTPError
from tornado.httpclient import AsyncHTTPClient
import json
if not os.path.exists(args.control_unix_socket):
print("Socket file {} does not exist !".format(args.control_unix_socket), file=sys.stderr)
sys.exit(1)
@gen.coroutine
def do_unregister():
resolver = UnixResolver(socket_file=args.control_unix_socket, resolver=Resolver())
AsyncHTTPClient.configure(None, resolver=resolver)
client = AsyncHTTPClient()
try:
response = yield client.fetch('http://unixsocket/ports/{}'.format(args.port),
method='DELETE')
except HTTPError as he:
print("Could not unregister port: {}".format(he), file=sys.stderr)
sys.exit(1)
except Exception as e:
logging.exception("Could not unregister port")
sys.exit(1)
print(response.body)
ioloop.IOLoop.current().run_sync(do_unregister)
def main():
# from tornado.options import define, options
#
# define("port", default=9090, help="listen on the given port", type=int)
# define("host", default="0.0.0.0", help="listen on the given host")
# define("control_unix_socket", default=DEFAULT_CONTROL_SOCKET_PATH, help="path to the control unix socket to bind")
# define("nginx_pid_file", default=DEFAULT_NGINX_PID_FILE, help="Location of nginx PID file")
# define("nginx_conf_file", default=DEFAULT_NGINX_CONF_FILE, help="Location of nginx conf file")
parser = argparse.ArgumentParser(
description="Connector Controller",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--debug", help='debug mode', action="store_true")
parser.add_argument("--control-unix-socket", default=DEFAULT_CONTROL_SOCKET_PATH,
help="path to the control unix socket to bind")
subparsers = parser.add_subparsers()
start_p = subparsers.add_parser('start', help='start server')
start_p.add_argument("-p", "--port", default=DEFAULT_PORT, help="listen on the given port", type=int)
start_p.add_argument("--host", default=DEFAULT_HOST, help="listen on the given host")
start_p.add_argument("--wait-delay", default=DEFAULT_WAIT_DELAY, help="wait delay in seconds between port checks")
start_p.add_argument("--nginx-port", default=DEFAULT_NGINX_PORT, help="nginx server port")
start_p.add_argument("--nginx-pid-file", default=DEFAULT_NGINX_PID_FILE, help="Location of nginx PID file")
start_p.add_argument("--nginx-conf-file", default=DEFAULT_NGINX_CONF_FILE, help="Location of nginx conf file")
start_p.set_defaults(func=start_server)
register_p = subparsers.add_parser('register', help='register port')
register_p.add_argument("port", help="port number", type=int)
register_p.add_argument("-n", "--name", default=None, help="port name")
register_p.add_argument("-t", "--type", default=None, help="port type")
register_p.add_argument("-d", "--description", default=None, help="port description")
register_p.set_defaults(func=register_port)
unregister_p = subparsers.add_parser('unregister', help='unregister port')
unregister_p.add_argument("port", help="port number or name")
unregister_p.set_defaults(func=unregister_port)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1608001 | #!/usr/bin/env python
"""
setup.py file for WarpX
"""
import sys
import argparse
from setuptools import setup
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--with-libwarpx', type=str, default=None, help='Install libwarpx with the given value as DIM. This option is only used by the makefile.')
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
if args.with_libwarpx:
package_data = {'pywarpx' : ['libwarpx%s.so'%args.with_libwarpx]}
else:
package_data = {}
setup (name = 'pywarpx',
version = '19.08',
packages = ['pywarpx'],
package_dir = {'pywarpx':'pywarpx'},
description = """Wrapper of WarpX""",
package_data = package_data,
install_requires=['picmistandard', 'periodictable']
)
| StarcoderdataPython |
1684680 | #import statements
from tkinter import*
import time
start_time = 0
end_time = 0
total_time = 0
def time_display(seconds):
#get the floor value of minutes by dividing value of seconds by 60
minutes = seconds//60
#get the floor value of hours by dividing value of minutes by 60
hours = minutes//60
minutes = minutes%60
seconds = seconds%60
msg = "Time Lapsed = {0}:{1}:{2}".format(int(hours),int(minutes),int(seconds))
lbl = Label(mw,text = msg,fg = "blue",bg = "white",font=("Comic Sans MS",14,"bold italic"),width = 100, height = 100 )
lbl.pack(side = "top")
def timer_start():
global start_time
print("in start time")
start_time = time.time()
print(start_time)
def timer_end():
end_time = time.time()
total_time = end_time - start_time
time_display(int(total_time))
#Create root window
mw = Tk()
mw.geometry("300x300")
#Create a frame Object
mf = Frame(mw)
#attach frame to root window
mf.pack(side = "bottom")
#First button
button1 = Button(mf, text = "Start",bg = "green", bd = 10,command = timer_start)
#Attach button1 to the frame on the left side
button1.pack(side = "left")
#Second Button
button2 = Button(mf, text = "Stop",bg = "red",bd = 10,command = timer_end)
#Attach button2 to the frame on the right side
button2.pack(side = "right")
#Main Loop
mw.mainloop()
| StarcoderdataPython |
3318507 | <reponame>wearelumenai/distclus4py<gh_stars>1-10
from distclus import bind
from .ffi import lib
from .oc import OnlineClust
class KMeans(OnlineClust):
"""Proxy a KMEANS algorithm implemented in native library"""
def __init__(
self, space='euclid', par=True, init='kmeans_pp', init_descr=None,
k=16, nb_iter=100, frame_size=None, seed=None,
iter_freq=0, data_per_iter=0, timeout=0, num_cpu=0, iter=None,
data=None, inner_space='euclid', window=None
):
super(KMeans, self).__init__(
lib.KMeans, space, data, bind.par(par),
*bind.initializer(init), bind.none2zero(seed),
k, nb_iter if iter is None else iter, bind.none2zero(frame_size),
iter_freq, data_per_iter, timeout, num_cpu,
bind.space(inner_space),
bind.none2zero(window)
)
| StarcoderdataPython |
1634310 | # Generated by Django 3.2.9 on 2021-12-03 16:24
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
def create_levels(apps, schema_editor):
Level = apps.get_model('base', 'Level')
Level.objects.bulk_create(
[
Level(
name=n,
)
for n in
['Новичок', 'Пилот', 'Профессионал']
]
)
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.ImageField(blank=True, null=True, upload_to='')),
('is_admin', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Level',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('description', models.TextField()),
('task', models.TextField()),
('max_attempts', models.PositiveIntegerField(default=1)),
('level', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='base.level')),
],
),
migrations.CreateModel(
name='TeoryInfo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('info', models.TextField()),
],
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1024)),
('max_attempts', models.PositiveIntegerField(default=1)),
('data', models.TextField(default='[]')),
],
),
migrations.CreateModel(
name='TestAttempt',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dttm_start', models.DateTimeField(auto_now_add=True)),
('dttm_end', models.DateTimeField(blank=True, null=True)),
('answers', models.TextField(default='[]')),
('test', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='base.test')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TaskAttempt',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('E', 'Экзамен'), ('P', 'Пробная попытка')], default='P', max_length=1)),
('dttm_added', models.DateTimeField(auto_now_add=True)),
('dttm_start', models.DateTimeField()),
('dttm_end', models.DateTimeField(blank=True, null=True)),
('track', models.TextField(default='[]')),
('score', models.FloatField(default=0)),
('task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='base.task')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RunPython(create_levels),
]
| StarcoderdataPython |
4821767 | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Test Case Title : Verify that a rigid body with "Interpolate motion" option selected moves smoothly.
"""
# fmt: off
class Tests():
create_entity = ("Created test entity", "Failed to create test entity")
rigid_body_added = ("Added PhysX Rigid Body component", "Failed to add PhysX Rigid Body component")
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
exit_game_mode = ("Exited game mode", "Failed to exit game mode")
rigid_body_smooth = ("Rigid body motion passed smoothness threshold", "Failed to meet smoothness threshold for rigid body motion")
# fmt: on
def Tick_InterpolatedRigidBodyMotionIsSmooth():
"""
Summary:
Create entity with PhysX Rigid Body component and turn on the Interpolate motion setting.
Verify that the position of the rigid body varies smoothly with time.
Expected Behavior:
1) The motion of the rigid body under gravity is a smooth curve, rather than an erratic/jittery movement.
Test Steps:
1) Load the empty level
2) Create an entity
3) Add rigid body component
4) Enter game mode and collect data for the rigid body's z co-ordinate and the time values for a series of frames
5) Check if the motion of the rigid body was sufficiently smooth
:return: None
"""
# imports
import os
import azlmbr.legacy.general as general
import azlmbr.math as math
from editor_python_test_tools.editor_entity_utils import EditorEntity as Entity
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
from editor_python_test_tools.asset_utils import Asset
import numpy as np
# constants
COEFFICIENT_OF_DETERMINATION_THRESHOLD = 1 - 1e-4 # curves with values below this are not considered sufficiently smooth
helper.init_idle()
# 1) Load the empty level
helper.open_level("", "Base")
# 2) Create an entity
test_entity = Entity.create_editor_entity("test_entity")
Report.result(Tests.create_entity, test_entity.id.IsValid())
azlmbr.components.TransformBus(
azlmbr.bus.Event, "SetWorldTranslation", test_entity.id, math.Vector3(0.0, 0.0, 0.0))
# 3) Add rigid body component
rigid_body_component = test_entity.add_component("PhysX Rigid Body")
rigid_body_component.set_component_property_value("Configuration|Interpolate motion", True)
azlmbr.physics.RigidBodyRequestBus(azlmbr.bus.Event, "SetLinearDamping", test_entity.id, 0.0)
Report.result(Tests.rigid_body_added, test_entity.has_component("PhysX Rigid Body"))
# 4) Enter game mode and collect data for the rigid body's z co-ordinate and the time values for a series of frames
t = []
z = []
helper.enter_game_mode(Tests.enter_game_mode)
general.idle_wait_frames(1)
game_entity_id = general.find_game_entity("test_entity")
for frame in range(100):
t.append(azlmbr.components.TickRequestBus(azlmbr.bus.Broadcast, "GetTimeAtCurrentTick").GetSeconds())
z.append(azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldZ", game_entity_id))
general.idle_wait_frames(1)
helper.exit_game_mode(Tests.exit_game_mode)
# 5) Test that the z vs t curve is sufficiently smooth (if the interpolation is not working well, the curve will be less smooth)
# normalize the t and z data
t = np.array(t) - np.mean(t)
z = np.array(z) - np.mean(z)
# fit a polynomial to the z vs t curve
fit = np.poly1d(np.polyfit(t, z, 4))
residual = fit(t) - z
# calculate the coefficient of determination (a measure of how closely the polynomial curve fits the data)
# if the coefficient is very close to 1, then the curve fits the data very well, suggesting that the rigid body motion is smooth
# if the coefficient is significantly less than 1, then the z values vary more erratically relative to the smooth curve,
# indicating that the motion of the rigid body is not smooth
coefficient_of_determination = (1 - np.sum(residual * residual) / np.sum(z * z))
Report.result(Tests.rigid_body_smooth, bool(coefficient_of_determination > COEFFICIENT_OF_DETERMINATION_THRESHOLD))
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(Tick_InterpolatedRigidBodyMotionIsSmooth)
| StarcoderdataPython |
1750098 | <gh_stars>0
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Functional tests for ZFS filesystem implementation.
These tests require the ability to create a new ZFS storage pool (using
``zpool``) and the ability to interact with that pool (using ``zfs``).
Further coverage is provided in
:module:`flocker.volume.test.test_filesystems_zfs`.
"""
import subprocess
import errno
from twisted.internet import reactor
from twisted.internet.task import cooperate
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from ..test.filesystemtests import (
make_ifilesystemsnapshots_tests, make_istoragepool_tests, create_and_copy,
copy, assertVolumesEqual,
)
from ..filesystems.errors import MaximumSizeTooSmall
from ..filesystems.zfs import (
Snapshot, ZFSSnapshots, Filesystem, StoragePool, volume_to_dataset,
zfs_command,
)
from ..service import Volume, VolumeName
from .._model import VolumeSize
from ..testtools import create_zfs_pool, service_for_pool
class IFilesystemSnapshotsTests(make_ifilesystemsnapshots_tests(
lambda test_case: ZFSSnapshots(
reactor, Filesystem(create_zfs_pool(test_case), None)))):
"""``IFilesystemSnapshots`` tests for ZFS."""
def build_pool(test_case):
"""
Create a ``StoragePool``.
:param TestCase test_case: The test in which this pool will exist.
:return: A new ``StoragePool``.
"""
return StoragePool(reactor, create_zfs_pool(test_case),
FilePath(test_case.mktemp()))
class IStoragePoolTests(make_istoragepool_tests(
build_pool, lambda fs: ZFSSnapshots(reactor, fs))):
"""
``IStoragePoolTests`` for ZFS storage pool.
"""
MY_VOLUME = VolumeName(namespace=u"myns", dataset_id=u"myvolume")
MY_VOLUME2 = VolumeName(namespace=u"myns", dataset_id=u"myvolume2")
class VolumeToDatasetTests(TestCase):
"""Tests for ``volume_to_dataset``."""
def test_volume_to_dataset(self):
"""``volume_to_dataset`` includes the node ID, dataset
name and (for future functionality) a default branch name.
"""
volume = Volume(node_id=u"my-uuid", name=MY_VOLUME, service=None)
self.assertEqual(volume_to_dataset(volume),
b"my-uuid.myns.myvolume")
class StoragePoolTests(TestCase):
"""
ZFS-specific ``StoragePool`` tests.
"""
def test_mount_root(self):
"""Mountpoints are children of the mount root."""
mount_root = FilePath(self.mktemp())
mount_root.makedirs()
pool = StoragePool(reactor, create_zfs_pool(self), mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def gotFilesystem(filesystem):
self.assertEqual(filesystem.get_path(),
mount_root.child(volume_to_dataset(volume)))
d.addCallback(gotFilesystem)
return d
def test_filesystem_identity(self):
"""
Filesystems are created with the correct pool and dataset names.
"""
mount_root = FilePath(self.mktemp())
pool_name = create_zfs_pool(self)
pool = StoragePool(reactor, pool_name, mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def gotFilesystem(filesystem):
self.assertEqual(
filesystem,
Filesystem(pool_name, volume_to_dataset(volume)))
d.addCallback(gotFilesystem)
return d
def test_actual_mountpoint(self):
"""
The mountpoint of the filesystem is the actual ZFS mountpoint.
"""
mount_root = FilePath(self.mktemp())
pool_name = create_zfs_pool(self)
pool = StoragePool(reactor, pool_name, mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def gotFilesystem(filesystem):
self.assertEqual(
filesystem.get_path().path,
subprocess.check_output(
[b"zfs", b"get", b"-H", b"-o", b"value",
b"mountpoint", filesystem.name]).strip())
d.addCallback(gotFilesystem)
return d
def test_no_maximum_size(self):
"""
The filesystem is created with no ``refquota`` property if the maximum
size is unspecified.
"""
mount_root = FilePath(self.mktemp())
pool_name = create_zfs_pool(self)
pool = StoragePool(reactor, pool_name, mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def created_filesystem(filesystem):
refquota = subprocess.check_output([
b"zfs", b"get", b"-H", b"-o", b"value", b"refquota",
filesystem.name]).strip()
self.assertEqual(b"none", refquota)
d.addCallback(created_filesystem)
return d
def test_maximum_size_sets_refquota(self):
"""
The filesystem is created with a ``refquota`` property set to the value
of the volume's maximum size if that value is not ``None``.
"""
size = VolumeSize(maximum_size=1024 * 64)
mount_root = FilePath(self.mktemp())
pool_name = create_zfs_pool(self)
pool = StoragePool(reactor, pool_name, mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME, size=size)
d = pool.create(volume)
def created_filesystem(filesystem):
refquota = subprocess.check_output([
b"zfs", b"get",
# Skip displaying the header
b"-H",
# Display machine-parseable (exact) values
b"-p",
# Output only the value
b"-o", b"value",
# Get the value of the refquota property
b"refquota",
# For this filesystem
filesystem.name]).decode("ascii").strip()
if refquota == u"none":
refquota = None
else:
refquota = int(refquota)
self.assertEqual(size.maximum_size, refquota)
d.addCallback(created_filesystem)
return d
def test_change_owner_does_not_remove_non_empty_mountpoint(self):
"""
``StoragePool.change_owner()`` doesn't delete the contents of the
original mountpoint, if it is non-empty.
ZFS doesn't like to mount volumes over non-empty directories. To test
this, we change the original mount to be a legacy mount (mounted using
manpage:`mount(8)`).
"""
pool = StoragePool(reactor, create_zfs_pool(self),
FilePath(self.mktemp()))
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
new_volume = Volume(node_id=u"other-uuid", name=MY_VOLUME2,
service=service)
original_mount = volume.get_filesystem().get_path()
d = pool.create(volume)
def created_filesystems(ignored):
filesystem_name = volume.get_filesystem().name
subprocess.check_call(['zfs', 'unmount', filesystem_name])
# Create a file hiding under the original mount point
original_mount.child('file').setContent('content')
# Remount the volume at the original mount point as a legacy mount.
subprocess.check_call(['zfs', 'set', 'mountpoint=legacy',
filesystem_name])
subprocess.check_call(['mount', '-t', 'zfs', filesystem_name,
original_mount.path])
return pool.change_owner(volume, new_volume)
d.addCallback(created_filesystems)
self.assertFailure(d, OSError)
def changed_owner(filesystem):
self.assertEqual(original_mount.child('file').getContent(),
b'content')
d.addCallback(changed_owner)
return d
def test_locally_owned_created_writeable(self):
"""
A filesystem which is created for a locally owned volume is writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def created_filesystems(filesystem):
# This would error if writing was not possible:
filesystem.get_path().child(b"text").setContent(b"hello")
d.addCallback(created_filesystems)
return d
def assertReadOnly(self, path):
"""
Assert writes are not possible to the given filesystem path.
:param FilePath path: Directory which ought to be read-only.
"""
exc = self.assertRaises(OSError,
path.child(b"text").setContent, b"hello")
self.assertEqual(exc.args[0], errno.EROFS)
def test_remotely_owned_created_readonly(self):
"""
A filesystem which is created for a remotely owned volume is not
writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
volume = Volume(node_id=u"remoteone", name=MY_VOLUME, service=service)
d = pool.create(volume)
def created_filesystems(filesystem):
self.assertReadOnly(filesystem.get_path())
d.addCallback(created_filesystems)
return d
def test_locally_owned_cloned_writeable(self):
"""
A filesystem which is cloned into a locally owned volume is writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
parent = service.get(MY_VOLUME2)
volume = service.get(MY_VOLUME)
d = pool.create(parent)
d.addCallback(lambda _: pool.clone_to(parent, volume))
def created_filesystems(filesystem):
# This would error if writing was not possible:
filesystem.get_path().child(b"text").setContent(b"hello")
d.addCallback(created_filesystems)
return d
def test_remotely_owned_cloned_readonly(self):
"""
A filesystem which is cloned into a remotely owned volume is not
writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
parent = service.get(MY_VOLUME2)
volume = Volume(node_id=u"remoteone", name=MY_VOLUME, service=service)
d = pool.create(parent)
d.addCallback(lambda _: pool.clone_to(parent, volume))
def created_filesystems(filesystem):
self.assertReadOnly(filesystem.get_path())
d.addCallback(created_filesystems)
return d
def test_written_created_readonly(self):
"""
A filesystem which is received from a remote filesystem (which is
writable in its origin pool) is not writeable.
"""
d = create_and_copy(self, build_pool)
def got_volumes(copied):
self.assertReadOnly(copied.to_volume.get_filesystem().get_path())
d.addCallback(got_volumes)
return d
def test_owner_change_to_locally_becomes_writeable(self):
"""
A filesystem which was previously remotely owned and is now locally
owned becomes writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
local_volume = service.get(MY_VOLUME)
remote_volume = Volume(node_id=u"other-uuid", name=MY_VOLUME2,
service=service)
d = pool.create(remote_volume)
def created_filesystems(ignored):
return pool.change_owner(remote_volume, local_volume)
d.addCallback(created_filesystems)
def changed_owner(filesystem):
# This would error if writing was not possible:
filesystem.get_path().child(b"text").setContent(b"hello")
d.addCallback(changed_owner)
return d
def test_owner_change_to_remote_becomes_readonly(self):
"""
A filesystem which was previously locally owned and is now remotely
owned becomes unwriteable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
local_volume = service.get(MY_VOLUME)
remote_volume = Volume(node_id=u"other-uuid", name=MY_VOLUME2,
service=service)
d = pool.create(local_volume)
def created_filesystems(ignored):
return pool.change_owner(local_volume, remote_volume)
d.addCallback(created_filesystems)
def changed_owner(filesystem):
self.assertReadOnly(filesystem.get_path())
d.addCallback(changed_owner)
return d
def test_write_update_to_changed_filesystem(self):
"""
Writing an update of the contents of one pool's filesystem to
another pool's filesystem that was previously created this way and
was since changed drops any changes and updates its contents to
the sender's.
"""
d = create_and_copy(self, build_pool)
def got_volumes(copied):
from_volume, to_volume = copied.from_volume, copied.to_volume
# Mutate the second volume's filesystem:
to_filesystem = to_volume.get_filesystem()
subprocess.check_call([b"zfs", b"set", b"readonly=off",
to_filesystem.name])
to_path = to_filesystem.get_path()
to_path.child(b"extra").setContent(b"lalala")
# Writing from first volume to second volume should revert
# any changes to the second volume:
from_path = from_volume.get_filesystem().get_path()
from_path.child(b"anotherfile").setContent(b"hello")
from_path.child(b"file").remove()
copying = copy(from_volume, to_volume)
def copied(ignored):
assertVolumesEqual(self, from_volume, to_volume)
copying.addCallback(copied)
return copying
d.addCallback(got_volumes)
return d
class IncrementalPushTests(TestCase):
"""
Tests for incremental push based on ZFS snapshots.
"""
def test_less_data(self):
"""
Fewer bytes are available from ``Filesystem.reader`` when the reader
and writer are found to share a snapshot.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
creating = pool.create(volume)
def created(filesystem):
# Save it for later use.
self.filesystem = filesystem
# Put some data onto the volume so there is a baseline against
# which to compare.
path = filesystem.get_path()
path.child(b"some-data").setContent(b"hello world" * 1024)
# TODO: Snapshots are created implicitly by `reader`. So abuse
# that fact to get a snapshot. An incremental send based on this
# snapshot will be able to exclude the data written above.
# Ultimately it would be better to have an API the purpose of which
# is explicitly to take a snapshot and to use that here instead of
# relying on `reader` to do this.
with filesystem.reader() as reader:
# Capture the size of this stream for later comparison.
self.complete_size = len(reader.read())
# Capture the snapshots that exist now so they can be given as an
# argument to the reader method.
snapshots = filesystem.snapshots()
return snapshots
loading = creating.addCallback(created)
def loaded(snapshots):
# Perform another send, supplying snapshots available on the writer
# so an incremental stream can be constructed.
with self.filesystem.reader(snapshots) as reader:
incremental_size = len(reader.read())
self.assertTrue(
incremental_size < self.complete_size,
"Bytes of data for incremental send ({}) was not fewer than "
"bytes of data for complete send ({}).".format(
incremental_size, self.complete_size)
)
loading.addCallback(loaded)
return loading
class FilesystemTests(TestCase):
"""
ZFS-specific tests for ``Filesystem``.
"""
def test_snapshots(self):
"""
The ``Deferred`` returned by ``Filesystem.snapshots`` fires with a
``list`` of ``Snapshot`` instances corresponding to the snapshots that
exist for the ZFS filesystem to which the ``Filesystem`` instance
corresponds.
"""
expected_names = [b"foo", b"bar"]
# Create a filesystem and a couple snapshots.
pool = build_pool(self)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
creating = pool.create(volume)
def created(filesystem):
# Save it for later.
self.filesystem = filesystem
# Take a couple snapshots now that there is a filesystem.
return cooperate(
zfs_command(
reactor, [
b"snapshot",
u"{}@{}".<EMAIL>(filesystem.name, name).encode("ascii"),
]
)
for name in expected_names
).whenDone()
snapshotting = creating.addCallback(created)
def snapshotted(ignored):
# Now that some snapshots exist, interrogate the system.
return self.filesystem.snapshots()
loading = snapshotting.addCallback(snapshotted)
def loaded(snapshots):
self.assertEqual(
list(Snapshot(name=name) for name in expected_names),
snapshots)
loading.addCallback(loaded)
return loading
def test_maximum_size_too_small(self):
"""
If the maximum size specified for filesystem creation is smaller than
the storage pool allows, ``MaximumSizeTooSmall`` is raised.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
# This happens to be too small for any ZFS filesystem.
volume = service.get(MY_VOLUME, size=VolumeSize(maximum_size=10))
creating = pool.create(volume)
return self.assertFailure(creating, MaximumSizeTooSmall)
def test_maximum_size_enforced(self):
"""
The maximum size specified for a filesystem is enforced by the ZFS
implementation. Attempts to write more data than the maximum size
fail.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
# 40 MiB is an arbitrary value for the maximum size which is
# sufficiently smaller than the current test pool size of 100 MiB.
# Note that at the moment the usable pool size (minus the internal
# data and reservations) is about 60 MiB.
maximum_size = 40 * 1024 * 1024
volume = service.get(
MY_VOLUME, size=VolumeSize(maximum_size=maximum_size))
creating = pool.create(volume)
def created(filesystem):
path = filesystem.get_path()
# Try to write one byte more than the maximum_size of data.
with path.child(b"ok").open("w") as fObj:
chunk_size = 8 * 1024
chunk = b"x" * chunk_size
for i in range(maximum_size / chunk_size):
fObj.write(chunk)
fObj.flush()
with self.assertRaises(IOError) as ctx:
fObj.write(b"x")
fObj.flush()
self.assertEqual(ctx.exception.args[0], errno.EDQUOT)
creating.addCallback(created)
return creating
| StarcoderdataPython |
1676902 | <gh_stars>0
# -*- coding: utf-8 -*-
from flask import (Blueprint, current_app, redirect, render_template, request,
url_for)
from pony.orm import db_session
from dashboard import db, service
from dashboard.config import config
from dashboard.exceptions import (BadDataFormat, PageOutOfRange,
PipelineNotFound, RemoteServerError)
from dashboard.history import BuildSetsHistory, pagination
status = Blueprint('status', __name__, template_folder='templates')
builds = Blueprint('builds', __name__, template_folder='templates')
auth = Blueprint('auth', __name__, template_folder='templates')
error_handlers = Blueprint('error_handlers', __name__,
template_folder='templates/errors')
@error_handlers.app_errorhandler(BadDataFormat)
@error_handlers.app_errorhandler(RemoteServerError)
@error_handlers.app_errorhandler(Exception)
def generic_error(error):
current_app.logger.error(f"{error} on URL: {request.base_url}")
return render_template('error.html')
@error_handlers.app_errorhandler(PipelineNotFound)
@error_handlers.app_errorhandler(PageOutOfRange)
@error_handlers.app_errorhandler(404)
def error_404(error):
current_app.logger.error(f"{error} on URL: {request.base_url}")
return render_template('error_404.html')
@status.route('/status')
@status.route('/status/<string:pipename>', methods=['GET'])
def show_status(pipename=config['default']['pipename']):
url = service.status_endpoint()
resource = service.fetch_json_data(endpoint=url)
queues = service.make_queues(resource['pipelines'], pipename)
return render_template('status.html', queues=queues, pipename=pipename)
@status.route('/', methods=['GET'])
def show_dashboard():
url = service.status_endpoint()
resource = service.fetch_json_data(endpoint=url)
pipeline_stats = service.pipelines_stats(resource['pipelines'])
return render_template('dashboard.html', pipeline_stats=pipeline_stats)
@builds.route('/builds')
@builds.route('/builds/<int:page>', methods=['GET'])
@db_session
def show_builds_history(page=1):
per_page = config['buildset']['per_page']
pipeline = config['default']['pipename']
page_links = config['buildset']['page_links']
buildset_log_url = config['buildset']['log_url']
db.connect()
buildsets = BuildSetsHistory(pipeline, per_page)
buildsets.fetch_page(page)
paginator = pagination(len(buildsets), page, per_page, page_links)
return render_template('builds_history.html', buildsets=buildsets,
paginator=paginator,
buildsets_log_url=buildset_log_url)
@auth.route('/sign_in')
def sign_in():
"""
TODO (pawelzny): Redirect user to OpenID page for authorization
When user gives us full_name and email should
be redirect to /signed_in route where
session will be created.
For now we redirect user strictly to signed_in with fake token
to simulate real scenario since OpenID is not ready yet.
"""
return redirect(url_for('auth.signed_in', token='tmp_fake_token')) # noqa
@auth.route('/signed_in/<token>')
def signed_in(token):
user = service.fetch_user_data(token)
service.create_user_session(user)
return redirect(url_for('status.show_dashboard'))
@auth.route('/sign_out')
def sign_out():
service.drop_user_session()
return redirect(url_for('status.show_dashboard'))
| StarcoderdataPython |
1663351 | # coding: utf-8
import os
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
from tornado.escape import json_decode, json_encode
from tornado.concurrent import Future
from object_detection.rfcn_detection import rfcn_model_instance
from classifier import classifier as clf
define("port", default=8000, help="run on the given port", type=int)
class ClothHandler(tornado.web.RequestHandler):
def initialize(self):
self.cloth_service = ClothService()
@tornado.gen.coroutine
def post(self):
# [Request] (multipart/form-data)
# {
# "name": "img",
# "file": "xxx.jpg"
# }
file_metas = self.request.files.get("img")
# 上传图片,返回图片路径
image_path = yield self.cloth_service.upload_image(file_metas)
print("---- 文件上传完成,正在边框检测 ----")
if image_path:
bboxs = yield self.cloth_service.detection_model_run(image_path)
print("---- bounding box 检测完成,正在分类搜索 ----")
res = yield self.cloth_service.classifier_model_run(image_path, bboxs)
print("---- 分类检测完成 ----")
else:
res = dict(
rtn = 500,
msg = "文件上传出错",
data = {}
)
self.set_status(res.get("rtn"))
self.set_header("Content-Type", "application/json")
self.write(json_encode(res))
self.finish()
# Service
class ClothService(object):
def upload_image(self, file_metas):
res_future = Future()
file_path = None
if (file_metas):
for meta in file_metas:
upload_path = os.path.join(os.path.dirname(__file__), "realimg")
filename = meta['filename']
file_path = os.path.join(upload_path, filename)
with open(file_path, 'wb') as f:
f.write(meta['body'])
res_future.set_result(file_path)
print(file_path)
return res_future
def detection_model_run(self, image_path):
bboxs_future = Future()
try:
bboxs = rfcn_model_instance.detection_image(image_path)
bboxs = list(map(lambda x: dict(
label = x.get("class"),
score = x.get("score"),
bbox = dict(
x_min = x["bbox"][0],
y_min = x["bbox"][1],
x_max = x["bbox"][2],
y_max = x["bbox"][3]
)
), bboxs))
except Exception as e:
bboxs = []
bboxs_future.set_result(bboxs)
return bboxs_future
def classifier_model_run(self, image_path, bboxs):
res_future = Future()
res = dict(
rtn = 200,
msg = "",
data = {}
)
res_images = []
for bbox_data in bboxs:
bbox = bbox_data.get("bbox")
images = (clf.similar_cloth(image_path, \
bbox.get("y_min", 0), bbox.get("x_min", 0),\
bbox.get("y_max", 0), bbox.get("x_max", 0)))
images = list(map(lambda x: "/" + str(x), images))
res_images += images
print(res_images)
if (len(res_images) > 0):
res["msg"] = "get images path"
res["data"] = dict(
images = res_images
)
else:
res["rtn"] = 404
res["msg"] = "未找到任何相似图片"
res_future.set_result(res)
return res_future
static_path = os.path.join(os.path.dirname(__file__), "static")
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=[
(r"/api/cloth_search", ClothHandler),
(r"/img/(.*)", tornado.web.StaticFileHandler, {"path": "./img/"})
])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| StarcoderdataPython |
3322117 | <gh_stars>1-10
from PIL import Image
img1 = Image.open("koala.png")
img2 = Image.open("koala2.png")
for y in range(1):
for x in range(img1.size[0]):
pix1 = img1.getpixel((x, y))
pix2 = img2.getpixel((x, y))
if pix1 != pix2:
print(pix1[0] - pix2[0], pix1[1] - pix2[1], pix1[2] - pix2[2])
| StarcoderdataPython |
1749797 | import sys
from B1B0_fuel import Fuel
from B1B1_innergas import InnerGas
from B1B2_clad import Clad
#--------------------------------------------------------------------------------------------------
class FuelRod:
#----------------------------------------------------------------------------------------------
# constructor: self is a 'fuelrod' object created in B1 and indx is the index of this object in the list of fuelrods
def __init__(self, indx, reactor):
# INITIALIZATION
# dictionary of the fuel rod
dictfuelrod = reactor.control.input['fuelrod'][indx]
# fuel rod id
self.id = dictfuelrod['id']
# number of axial layers specified in input for fuel rod indx
self.nz = len(dictfuelrod['fuelid'])
# axial mesh size
self.dz = []
for i in range(self.nz):
# check existence of neighbouring fluid pipe
jpipe = (dictfuelrod['pipeid'][i], dictfuelrod['pipenode'][i])
if not jpipe[0] in reactor.fluid.pipeid:
print('****ERROR: pipe id ' + jpipe[0] + ' given in \'fuelrod\' card is not specified in the \'pipe\' card of input.')
sys.exit()
else:
# pipe index
ipipe = reactor.fluid.pipeid.index(jpipe[0])
# check existence of neighbouring fluid pipe node
if jpipe[1] > reactor.fluid.pipennodes[ipipe]:
print('****ERROR: pipe node index (' + str(jpipe[1]) + ') given in \'fuelrod\' card exceeds number of nodes (' + str(reactor.fluid.pipennodes[ipipe]) + ') of pipe ' + jpipe[0])
sys.exit()
# pipe node indexes
jpipe = (ipipe, jpipe[1]-1)
self.dz.append(reactor.fluid.len[jpipe[0]]/reactor.fluid.pipennodes[jpipe[0]])
# create an object for every axial layer of fuel
self.fuel = []
for i in range(self.nz):
self.fuel.append(Fuel(i, indx, self.dz[i], reactor))
# create an object for inner gas
self.innergas = InnerGas(indx, reactor)
# create an object for every axial layer of clad
self.clad = []
for i in range(self.nz):
self.clad.append(Clad(i, indx, reactor))
#----------------------------------------------------------------------------------------------
# compose right-hand side list: self is a 'fuelrod' object created in B1,
# indx is the fuel rod index
def compose_rhs(self, indx, reactor, t):
# construct right-hand side list
rhs = []
for i in range(self.nz):
rhs += self.fuel[i].calculate_rhs(i, indx, reactor, t)
rhs += self.clad[i].calculate_rhs(i, indx, reactor, t)
return rhs
| StarcoderdataPython |
1713242 | <filename>tumorstoppy/test/blossom_time_test.py
from time import time
from tumorstoppy.distances import *
t1=time()
s1="CASSGATGREKFF"
s2="CASSGTTFREKFF"
weights=[1]*13
for ii in range(0,10000000):
#TMP=sigmoid(np.dot(weights, np.fromiter(blosum62_score(s1,s2),int)))
TMP=blosum62_distance([s1], [s2], weights=weights, allowed_gaps=0)
t2=time()
print (t2-t1)
| StarcoderdataPython |
3217125 | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
class TestElastiCacheCluster(BaseTest):
def test_elasticache_security_group(self):
session_factory = self.replay_flight_data(
'test_elasticache_security_group')
p = self.load_policy({
'name': 'elasticache-cluster-simple',
'resource': 'cache-cluster',
'filters': [
{'type': 'security-group',
'key': 'GroupName',
'value': 'default'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
sorted([r['CacheClusterId'] for r in resources]),
['dev-test-001', 'dev-test-002', 'dev-test-003'])
def test_elasticache_subnet_filter(self):
session_factory = self.replay_flight_data(
'test_elasticache_subnet_group_filter')
p = self.load_policy({
'name': 'elasticache-cluster-simple',
'resource': 'cache-cluster',
'filters': [
{'type': 'subnet',
'key': 'MapPublicIpOnLaunch',
'value': True}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
sorted([r['CacheClusterId'] for r in resources]),
['dev-test-001', 'dev-test-002', 'dev-test-003'])
def test_elasticache_cluster_simple(self):
session_factory = self.replay_flight_data(
'test_elasticache_cluster_simple')
p = self.load_policy({
'name': 'elasticache-cluster-simple',
'resource': 'cache-cluster'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 4)
def test_elasticache_cluster_simple_filter(self):
session_factory = self.replay_flight_data(
'test_elasticache_cluster_simple')
p = self.load_policy({
'name': 'elasticache-cluster-simple-filter',
'resource': 'cache-cluster',
'filters': [
{'type': 'value',
'key': 'Engine',
'value': 'memcached'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_elasticache_snapshot_copy_cluster_tags(self):
session_factory = self.replay_flight_data(
'test_elasticache_copy_cluster_tags')
results = session_factory().client(
'elasticache').list_tags_for_resource(
ResourceName='arn:aws:elasticache:us-west-2:644160558196:snapshot:test-tags-backup')['TagList']
tags = {t['Key']: t['Value'] for t in results}
self.assertEqual(tags, {})
policy = self.load_policy({
'name': 'test-copy-cluster-tags',
'resource': 'cache-snapshot',
'actions': [{
'type': 'copy-cluster-tags',
'tags': ['tag_new']}]},
config={'region': 'us-west-2'},
session_factory=session_factory)
resources = policy.run()
arn = policy.resource_manager.generate_arn(
resources[0]['SnapshotName'])
results = session_factory().client(
'elasticache').list_tags_for_resource(ResourceName=arn)['TagList']
tags = {t['Key']: t['Value'] for t in results}
self.assertEqual(tags['tag_new'], 'test_tag')
def test_elasticache_cluster_available(self):
session_factory = self.replay_flight_data(
'test_elasticache_cluster_available')
p = self.load_policy({
'name': 'elasticache-cluster-available',
'resource': 'cache-cluster',
'filters': [
{'type': 'value',
'key': 'CacheClusterStatus',
'value': 'available'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['CacheClusterStatus'], "available")
def test_elasticache_cluster_mark(self):
session_factory = self.replay_flight_data(
'test_elasticache_cluster_mark')
client = session_factory().client('elasticache')
p = self.load_policy({
'name': 'elasticache-cluster-mark',
'resource': 'cache-cluster',
'filters': [
{'type': 'value',
'key': 'Engine',
'value': 'memcached'}],
'actions': [
{'type': 'mark-for-op', 'days': 30,
'op': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(
resources[0]['CacheClusterId'])
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t['Key']: t['Value'] for t in tags['TagList']}
self.assertTrue('maid_status' in tag_map)
def test_elasticache_cluster_unmark(self):
session_factory = self.replay_flight_data(
'test_elasticache_cluster_unmark')
client = session_factory().client('elasticache')
p = self.load_policy({
'name': 'elasticache-cluster-unmark',
'resource': 'cache-cluster',
'filters': [
{'type': 'value',
'key': 'Engine',
'value': 'memcached'}],
'actions': [
{'type': 'unmark'}]},
session_factory=session_factory)
resources = p.run()
arn = p.resource_manager.generate_arn(
resources[0]['CacheClusterId'])
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
self.assertFalse('maid_status' in tags)
def test_elasticache_cluster_delete(self):
session_factory = self.replay_flight_data(
'test_elasticache_cluster_delete')
p = self.load_policy({
'name': 'elasticache-cluster-delete',
'resource': 'cache-cluster',
'filters': [
{'type': 'value',
'key': 'Engine',
'value': 'memcached'}],
'actions': [
{'type': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_elasticache_cluster_snapshot(self):
session_factory = self.replay_flight_data(
'test_elasticache_cluster_snapshot')
p = self.load_policy({
'name': 'elasticache-cluster-snapshot',
'resource': 'cache-cluster',
'actions': [{'type': 'snapshot'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 4)
class TestElastiCacheSubnetGroup(BaseTest):
def test_elasticache_subnet_group(self):
session_factory = self.replay_flight_data(
'test_elasticache_subnet_group')
p = self.load_policy({
'name': 'elasticache-subnet-group',
'resource': 'cache-subnet-group'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class TestElastiCacheSnapshot(BaseTest):
def test_elasticache_snapshot(self):
session_factory = self.replay_flight_data('test_elasticache_snapshot')
p = self.load_policy({
'name': 'elasticache-snapshot',
'resource': 'cache-snapshot'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_snapshot_age_filter(self):
factory = self.replay_flight_data('test_elasticache_snapshot')
p = self.load_policy({
'name': 'elasticache-snapshot-age-filter',
'resource': 'cache-snapshot',
'filters': [{'type': 'age', 'days': 2}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_snapshot_mark(self):
session_factory = self.replay_flight_data(
'test_elasticache_snapshot_mark')
client = session_factory().client('elasticache')
p = self.load_policy({
'name': 'elasticache-snapshot-mark',
'resource': 'cache-snapshot',
'filters': [
{'type': 'value',
'key': 'SnapshotName',
'value': 'redis-snapshot-1'}],
'actions': [
{'type': 'mark-for-op', 'days': 30,
'op': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(
resources[0]['SnapshotName'])
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t['Key']: t['Value'] for t in tags['TagList']}
self.assertTrue('maid_status' in tag_map)
def test_elasticache_snapshot_unmark(self):
session_factory = self.replay_flight_data(
'test_elasticache_snapshot_unmark')
client = session_factory().client('elasticache')
p = self.load_policy({
'name': 'elasticache-snapshot-unmark',
'resource': 'cache-snapshot',
'filters': [
{'type': 'value',
'key': 'SnapshotName',
'value': 'redis-snapshot-1'}],
'actions': [
{'type': 'unmark'}]},
session_factory=session_factory)
resources = p.run()
arn = p.resource_manager.generate_arn(
resources[0]['SnapshotName'])
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
self.assertFalse('maid_status' in tags)
def test_elasticache_snapshot_delete(self):
factory = self.replay_flight_data('test_elasticache_snapshot_delete')
p = self.load_policy({
'name': 'elasticache-snapshot-delete',
'resource': 'cache-snapshot',
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 3)
class TestModifyVpcSecurityGroupsAction(BaseTest):
def test_elasticache_remove_matched_security_groups(self):
#
# Test conditions:
# - running 2 Elasticache replication group in default VPC with 3 clusters
# - translates to 6 clusters
# - a default security group with id 'sg-7a3fcb13' exists
# - security group named PROD-ONLY-Test-Security-Group exists in VPC and is attached to one replication
# group
# - translates to 3 clusters marked non-compliant
#
# Results in 6 clusters with default Security Group attached
session_factory = self.replay_flight_data('test_elasticache_remove_matched_security_groups')
client = session_factory().client('elasticache', region_name='ca-central-1')
p = self.load_policy(
{'name': 'elasticache-remove-matched-security-groups',
'resource': 'cache-cluster',
'filters': [
{'type': 'security-group', 'key': 'GroupName', 'value': '(.*PROD-ONLY.*)', 'op': 'regex'}],
'actions': [
{'type': 'modify-security-groups', 'remove': 'matched', 'isolation-group': 'sg-7a3fcb13'}]
},
session_factory=session_factory)
clean_p = self.load_policy(
{'name': 'elasticache-verifyremove-matched-security-groups',
'resource': 'cache-cluster',
'filters': [
{'type': 'security-group', 'key': 'GroupName', 'value': 'default'}]
},
session_factory=session_factory)
resources = p.run()
waiter = client.get_waiter('replication_group_available')
waiter.wait()
clean_resources = clean_p.run()
# clusters autoscale across AZs, so they get -001, -002, etc appended
self.assertIn('sg-test-base', resources[0]['CacheClusterId'])
self.assertEqual(len(resources), 3)
self.assertEqual(len(resources[0]['SecurityGroups']), 1)
# show that it was indeed a replacement of security groups
self.assertEqual(len(clean_resources[0]['SecurityGroups']), 1)
self.assertEqual(len(clean_resources), 6)
def test_elasticache_add_security_group(self):
# Test conditions:
# - running Elasticache replication group in default VPC with 3 clusters
# - a default security group with id 'sg-7a3fcb13' exists
# - security group named PROD-ONLY-Test-Security-Group exists in VPC and is not attached
# - translates to 3 clusters marked to get new group attached
#
# Results in 3 clusters with default Security Group and PROD-ONLY-Test-Security-Group
session_factory = self.replay_flight_data('test_elasticache_add_security_group')
client = session_factory().client('elasticache', region_name='ca-central-1')
p = self.load_policy({
'name': 'add-sg-to-prod-elasticache',
'resource': 'cache-cluster',
'filters': [
{'type': 'security-group', 'key': 'GroupName', 'value': 'default'}
],
'actions': [
{'type': 'modify-security-groups', 'add': 'sg-6360920a'}
]
},
session_factory=session_factory)
clean_p = self.load_policy({
'name': 'validate-add-sg-to-prod-elasticache',
'resource': 'cache-cluster',
'filters': [
{'type': 'security-group', 'key': 'GroupName', 'value': 'default'},
{'type': 'security-group', 'key': 'GroupName', 'value': 'PROD-ONLY-Test-Security-Group'}
]
},
session_factory=session_factory)
resources = p.run()
waiter = client.get_waiter('replication_group_available')
waiter.wait()
clean_resources = clean_p.run()
self.assertEqual(len(resources), 3)
self.assertIn('sg-test-base', resources[0]['CacheClusterId'])
self.assertEqual(len(resources[0]['SecurityGroups']), 1)
self.assertEqual(len(clean_resources[0]['SecurityGroups']), 2)
self.assertEqual(len(clean_resources), 3)
| StarcoderdataPython |
1634705 | import pytest
from river.adapters.progression_counter import InMemoryProgressionCounter
from river.adapters.topics import InMemoryTopicsManager
from river.topicleaner.service import clean
pytestmark = pytest.mark.django_db
def test_done_batch_is_cleaned(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 10} for resource in batch.resources.all()}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics == set()
def test_done_batch_is_cleaned_with_failed(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={
f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 6, "failed": 4}
for resource in batch.resources.all()
}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics == set()
def test_ongoing_batch_is_not_cleaned(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 9} for resource in batch.resources.all()}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics != set()
def test_ongoing_batch_is_not_cleaned_with_failed(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={
f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 6, "failed": 2}
for resource in batch.resources.all()
}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics != set()
def test_none_counter_prevents_cleaning(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={f"{batch.id}:{resource.id}": {"extracted": None, "loaded": 10} for resource in batch.resources.all()}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics != set()
def test_missing_counter_prevents_cleaning(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 10} for resource in batch.resources.all()[1:]}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics != set()
| StarcoderdataPython |
4826485 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Collect statistics on transactions in a public bitcoin dataset that was
exported to avro
Usage:
export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json
python -m apache_beam.examples.bitcoin \
--compress --fastavro --output fastavro-compressed
"""
from __future__ import absolute_import
import argparse
import logging
import apache_beam as beam
from apache_beam.io.avroio import ReadFromAvro
from apache_beam.io.avroio import WriteToAvro
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
# pylint: disable=wrong-import-order, wrong-import-position
try:
from avro.schema import Parse # avro-python3 library for python3
except ImportError:
from avro.schema import parse as Parse # avro library for python2
# pylint: enable=wrong-import-order, wrong-import-position
class BitcoinTxnCountDoFn(beam.DoFn):
"""Count inputs and outputs per transaction"""
def __init__(self):
super(BitcoinTxnCountDoFn, self).__init__()
self.txn_counter = Metrics.counter(self.__class__, 'txns')
self.inputs_dist = Metrics.distribution(self.__class__, 'inputs_per_txn')
self.outputs_dist = Metrics.distribution(self.__class__, 'outputs_per_txn')
self.output_amts_dist = Metrics.distribution(self.__class__, 'output_amts')
self.txn_amts_dist = Metrics.distribution(self.__class__, 'txn_amts')
def process(self, elem):
"""Update counters and distributions, and filter and sum some fields"""
inputs = elem['inputs']
outputs = elem['outputs']
self.txn_counter.inc()
num_inputs = len(inputs)
num_outputs = len(outputs)
self.inputs_dist.update(num_inputs)
self.outputs_dist.update(num_outputs)
total = 0
for output in outputs:
amt = output['output_satoshis']
self.output_amts_dist.update(amt)
total += amt
self.txn_amts_dist.update(total)
return [
{
"transaction_id": elem["transaction_id"],
"timestamp": elem["timestamp"],
"block_id": elem["block_id"],
"previous_block": elem["previous_block"],
"num_inputs": num_inputs,
"num_outputs": num_outputs,
"sum_output": total,
}
]
SCHEMA = Parse('''
{
"namespace": "example.avro",
"type": "record",
"name": "Transaction",
"fields": [
{"name": "transaction_id", "type": "string"},
{"name": "timestamp", "type": "long"},
{"name": "block_id", "type": "string"},
{"name": "previous_block", "type": "string"},
{"name": "num_inputs", "type": "int"},
{"name": "num_outputs", "type": "int"},
{"name": "sum_output", "type": "long"}
]
}
''')
def run(argv=None):
"""Test Avro IO (backed by fastavro or Apache Avro) on a simple pipeline
that transforms bitcoin transactions"""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://beam-avro-test/bitcoin/txns/*',
help='Input file(s) to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
parser.add_argument('--compress',
dest='compress',
required=False,
action='store_true',
help='When set, compress the output data')
parser.add_argument('--fastavro',
dest='use_fastavro',
required=False,
action='store_true',
help='When set, use fastavro for Avro I/O')
opts, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
p = beam.Pipeline(options=pipeline_options)
# Read the avro file[pattern] into a PCollection.
records = \
p | 'read' >> ReadFromAvro(opts.input, use_fastavro=opts.use_fastavro)
measured = records | 'scan' >> beam.ParDo(BitcoinTxnCountDoFn())
# pylint: disable=expression-not-assigned
measured | 'write' >> \
WriteToAvro(
opts.output,
schema=SCHEMA,
codec=('deflate' if opts.compress else 'null'),
use_fastavro=opts.use_fastavro
)
result = p.run()
result.wait_until_finish()
# Do not query metrics when creating a template which doesn't run
if (not hasattr(result, 'has_job') # direct runner
or result.has_job): # not just a template creation
metrics = result.metrics().query()
for counter in metrics['counters']:
logging.info("Counter: %s", counter)
for dist in metrics['distributions']:
logging.info("Distribution: %s", dist)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| StarcoderdataPython |
4826023 | <filename>mnist-collection/siamese.py
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from six.moves import range
from contextlib import contextmanager
import numpy as np
import os
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
from _checkpoint_nnp_util import save_checkpoint, load_checkpoint, save_nnp
from args import get_args
from mnist_data import data_iterator_mnist
import nnabla.utils.save as save
def mnist_lenet_feature(image, test=False):
"""
Construct LeNet for MNIST.
"""
c1 = F.elu(PF.convolution(image, 20, (5, 5), name='conv1'))
c1 = F.average_pooling(c1, (2, 2))
c2 = F.elu(PF.convolution(c1, 50, (5, 5), name='conv2'))
c2 = F.average_pooling(c2, (2, 2))
c3 = F.elu(PF.affine(c2, 500, name='fc3'))
c4 = PF.affine(c3, 10, name='fc4')
c5 = PF.affine(c4, 2, name='fc_embed')
return c5
def mnist_lenet_siamese(x0, x1, test=False):
""""""
h0 = mnist_lenet_feature(x0, test)
h1 = mnist_lenet_feature(x1, test) # share weights
# h = (h0 - h1) ** 2 # equivalent
h = F.squared_error(h0, h1)
p = F.sum(h, axis=1)
return p
def contrastive_loss(sd, l, margin=1.0, eps=1e-4):
"""
This implements contrastive loss function given squared difference `sd` and labels `l` in {0, 1}.
f(sd, l) = l * sd + (1 - l) * max(0, margin - sqrt(sd))^2
NNabla implements various basic arithmetic operations. That helps write custom operations
with composition like this. This is handy, but still implementing NNabla Function in C++
gives you better performance advantage.
"""
sim_cost = l * sd
dissim_cost = (1 - l) * \
(F.maximum_scalar(margin - (sd + eps) ** (0.5), 0) ** 2)
return sim_cost + dissim_cost
class MnistSiameseDataIterator(object):
def __init__(self, itr0, itr1):
self.itr0 = itr0
self.itr1 = itr1
def next(self):
x0, l0 = self.itr0.next()
x1, l1 = self.itr1.next()
sim = (l0 == l1).astype(np.int).flatten()
return x0 / 255., x1 / 255., sim
def siamese_data_iterator(batch_size, train, rng=None):
itr0 = data_iterator_mnist(batch_size, train=train, rng=rng, shuffle=True)
itr1 = data_iterator_mnist(batch_size, train=train, rng=rng, shuffle=True)
return MnistSiameseDataIterator(itr0, itr1)
def train(args):
"""
Main script.
"""
# Get context.
from nnabla.ext_utils import get_extension_context
logger.info("Running in %s" % args.context)
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
# Create CNN network for both training and testing.
margin = 1.0 # Margin for contrastive loss.
# TRAIN
# Create input variables.
image0 = nn.Variable([args.batch_size, 1, 28, 28])
image1 = nn.Variable([args.batch_size, 1, 28, 28])
label = nn.Variable([args.batch_size])
# Create prediction graph.
pred = mnist_lenet_siamese(image0, image1, test=False)
# Create loss function.
loss = F.mean(contrastive_loss(pred, label, margin))
# TEST
# Create input variables.
vimage0 = nn.Variable([args.batch_size, 1, 28, 28])
vimage1 = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size])
# Create prediction graph.
vpred = mnist_lenet_siamese(vimage0, vimage1, test=True)
vloss = F.mean(contrastive_loss(vpred, vlabel, margin))
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
start_point = 0
if args.checkpoint is not None:
# load weights and solver state info from specified checkpoint file.
start_point = load_checkpoint(args.checkpoint, solver)
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_vloss = M.MonitorSeries("Test loss", monitor, interval=10)
# Initialize DataIterator for MNIST.
rng = np.random.RandomState(313)
data = siamese_data_iterator(args.batch_size, True, rng)
vdata = siamese_data_iterator(args.batch_size, False, rng)
# Training loop.
for i in range(start_point, args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(args.val_iter):
vimage0.d, vimage1.d, vlabel.d = vdata.next()
vloss.forward(clear_buffer=True)
ve += vloss.d
monitor_vloss.add(i, ve / args.val_iter)
if i % args.model_save_interval == 0:
# save checkpoint file
save_checkpoint(args.model_save_path, i, solver)
image0.d, image1.d, label.d = data.next()
solver.zero_grad()
# Training forward, backward and update
loss.forward(clear_no_need_grad=True)
loss.backward(clear_buffer=True)
solver.weight_decay(args.weight_decay)
solver.update()
monitor_loss.add(i, loss.d.copy())
monitor_time.add(i)
parameter_file = os.path.join(
args.model_save_path, 'params_%06d.h5' % args.max_iter)
nn.save_parameters(parameter_file)
def visualize(args):
"""
Visualizing embedded digits onto 2D space.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
batch_size = 500
# Load parameters
nn.load_parameters(os.path.join(args.model_save_path,
'params_%06d.h5' % args.max_iter))
# Create embedded network
image = nn.Variable([batch_size, 1, 28, 28])
feature = mnist_lenet_feature(image, test=True)
# Process all images
features = []
labels = []
# Prepare MNIST data iterator
rng = np.random.RandomState(313)
data = data_iterator_mnist(batch_size, train=False, rng=rng, shuffle=True)
for i in range(10000 // batch_size):
image_data, label_data = data.next()
image.d = image_data / 255.
feature.forward(clear_buffer=True)
features.append(feature.d.copy())
labels.append(label_data.copy())
features = np.vstack(features)
labels = np.vstack(labels)
# Visualize
f = plt.figure(figsize=(16, 9))
for i in range(10):
c = plt.cm.Set1(i / 10.)
plt.plot(features[labels.flat == i, 0].flatten(), features[
labels.flat == i, 1].flatten(), '.', c=c)
plt.legend(list(map(str, range(10))))
plt.grid()
plt.savefig(os.path.join(args.monitor_path, "embed.png"))
def save_siamese_nnp(args):
image = nn.Variable([1, 1, 28, 28])
feature = mnist_lenet_feature(image, test=True)
contents = save_nnp({'x': image}, {'y': feature}, args.batch_size)
save.save(os.path.join(args.model_save_path,
'{}_result.nnp'.format(args.net)), contents)
if __name__ == '__main__':
monitor_path = 'tmp.monitor.siamese'
args = get_args(monitor_path=monitor_path,
model_save_path=monitor_path, max_iter=5000)
train(args)
visualize(args)
save_siamese_nnp(args)
| StarcoderdataPython |
90759 | import unittest
import congressperson
from datasources import propublica
class TestCongress(unittest.TestCase):
def setUp(self):
self.cp = congressperson.Congressperson("H001075")
def test_get_name(self):
self.assertEqual(self.cp.get_name(), "<NAME>")
def test_get_recent_votes(self):
# Doesn't error
self.cp.get_recent_votes()
self.assertEqual(len(self.cp.get_recent_votes(10)), 10)
if __name__ == '__main__':
propublica.ProPublica.load_api_key()
unittest.main()
| StarcoderdataPython |
3330839 | import sys
from niveristand import nivs_rt_sequence
from niveristand import realtimesequencetools
from niveristand.clientapi import BooleanValue, ChannelReference, DoubleValue, I32Value
from niveristand.clientapi import RealTimeSequence
from niveristand.errors import TranslateError, VeristandError
from niveristand.library.primitives import localhost_wait
import pytest
from testutilities import rtseqrunner, validation
a = 1
b = 2
@nivs_rt_sequence
def _return_constant():
a = DoubleValue(5)
return a.value
@nivs_rt_sequence
def greater_eq_simple_numbers():
a = BooleanValue(False)
a.value = 5 >= 1
return a.value
@nivs_rt_sequence
def greater_eq_nivsdatatype_num():
a = BooleanValue(False)
a.value = DoubleValue(5) >= 2
return a.value
@nivs_rt_sequence
def greater_eq_num_nivsdatatype():
a = BooleanValue(False)
a.value = 5 >= DoubleValue(2)
return a.value
@nivs_rt_sequence
def greater_eq_nivsdatatype_nivsdatatype():
a = BooleanValue(False)
a.value = DoubleValue(5) >= DoubleValue(1)
return a.value
@nivs_rt_sequence
def greater_eq_nivsdatatype_nivsdatatype1():
a = BooleanValue(False)
a.value = DoubleValue(5) >= I32Value(1)
return a.value
@nivs_rt_sequence
def greater_eq_nivsdatatype_nivsdatatype2():
a = BooleanValue(False)
a.value = I32Value(5) >= DoubleValue(1)
return a.value
@nivs_rt_sequence
def greater_eq_nivsdatatype_nivsdatatype3():
a = BooleanValue(False)
a.value = I32Value(5) >= I32Value(2)
return a.value
@nivs_rt_sequence
def greater_eq_multiple_types():
a = BooleanValue(False)
a.value = DoubleValue(5) >= 2 >= 1.0
return a.value
@nivs_rt_sequence
def greater_eq_multiple_types1():
a = BooleanValue(False)
a.value = I32Value(5) >= DoubleValue(4) >= 3 >= 2.0
return a.value
@nivs_rt_sequence
def greater_eq_use_rtseq():
a = BooleanValue(False)
a.value = 6 >= _return_constant()
return a.value
@nivs_rt_sequence
def greater_eq_use_rtseq1():
a = BooleanValue(False)
a.value = _return_constant() >= 4
return a.value
@nivs_rt_sequence
def greater_eq_use_rtseq2():
a = BooleanValue(False)
a.value = DoubleValue(6) >= _return_constant()
return a.value
@nivs_rt_sequence
def greater_eq_use_rtseq3():
a = BooleanValue(False)
a.value = _return_constant() >= DoubleValue(4)
return a.value
@nivs_rt_sequence
def greater_eq_use_rtseq4():
a = BooleanValue(False)
a.value = I32Value(6) >= _return_constant()
return a.value
@nivs_rt_sequence
def greater_eq_use_rtseq5():
a = BooleanValue(False)
a.value = _return_constant() >= I32Value(1)
return a.value
@nivs_rt_sequence
def greater_eq_with_parentheses():
a = BooleanValue(False)
a.value = 5 >= (3 >= 2)
return a.value
@nivs_rt_sequence
def greater_eq_variables():
a = DoubleValue(5)
b = BooleanValue(False)
b.value = a >= 1
return b.value
@nivs_rt_sequence
def greater_eq_variables1():
a = DoubleValue(1)
b = BooleanValue(False)
b.value = 5 >= a.value
return b.value
@nivs_rt_sequence
def greater_eq_variable_variable():
a = DoubleValue(2)
b = DoubleValue(1)
c = BooleanValue(False)
c.value = a.value >= b.value
return c.value
@nivs_rt_sequence
def greater_eq_variable_variable1():
a = DoubleValue(2)
b = DoubleValue(1)
c = BooleanValue(False)
c.value = a >= b
return c.value
@nivs_rt_sequence
def greater_eq_variable_rtseq():
a = DoubleValue(6.0)
b = BooleanValue(False)
b.value = a.value >= _return_constant()
return b.value
@nivs_rt_sequence
def greater_eq_variable_rtseq1():
a = DoubleValue(1)
b = BooleanValue(False)
b.value = _return_constant() >= a.value
return b.value
@nivs_rt_sequence
def greater_eq_to_channel_ref():
a = BooleanValue(True)
b = ChannelReference("Aliases/DesiredRPM")
b.value = 5.0
localhost_wait()
a.value = 1 >= b.value
return a.value
@nivs_rt_sequence
def greater_eq_binary_unary():
a = BooleanValue(False)
a.value = 2 >= - 1
return a.value
@nivs_rt_sequence
def greater_eq_with_multiple_comparators():
a = BooleanValue(False)
a.value = 5 >= 4 >= 3 >= 2
return a.value
@nivs_rt_sequence
def greater_eq_complex_expr():
a = BooleanValue(False)
a.value = 2 >= (1 if 2 < 3 else 4)
return a.value
# <editor-fold desc=Invalid tests>
@nivs_rt_sequence
def greater_eq_invalid_variables():
return a.value >= b
@nivs_rt_sequence
def greater_eq_invalid_variables1():
return a.value >= b.value
@nivs_rt_sequence
def greater_eq_to_None():
a = BooleanValue(False)
a.value = None >= 1
return a.value
@nivs_rt_sequence
def greater_eq_invalid_rtseq_call():
a = BooleanValue(False)
a.value = _return_constant >= 1
return a.value
# </editor-fold>
@nivs_rt_sequence
def gt_equal_simple_numbers():
a = BooleanValue(False)
a.value = 1 >= 1
return a.value
@nivs_rt_sequence
def gt_equal_num_nivsdatatype():
a = BooleanValue(True)
a.value = DoubleValue(1) >= 2
return a.value
@nivs_rt_sequence
def gt_equal_nivsdatatype_nivsdatatype():
a = BooleanValue(False)
a.value = DoubleValue(1) >= DoubleValue(1)
return a.value
@nivs_rt_sequence
def gt_equal_nivsdatatype_nivsdatatype1():
a = BooleanValue(0)
a.value = DoubleValue(1) >= I32Value(1)
return a.value
@nivs_rt_sequence
def gt_equal_nivsdatatype_nivsdatatype2():
a = BooleanValue(0)
a.value = I32Value(1) >= DoubleValue(1)
return a.value
@nivs_rt_sequence
def gt_equal_nivsdatatype_nivsdatatype3():
a = BooleanValue(0)
a.value = I32Value(1) >= I32Value(2)
return a.value
@nivs_rt_sequence
def gt_equal_multiple_types():
a = BooleanValue(0)
a.value = DoubleValue(1) >= 1 >= 1.0
return a.value
@nivs_rt_sequence
def gt_equal_multiple_types1():
a = BooleanValue(0)
a.value = I32Value(1) >= DoubleValue(2) >= 3.0 >= 4
return a.value
@nivs_rt_sequence
def gt_equal_use_rtseq():
a = BooleanValue(0)
a.value = 5 >= _return_constant()
return a.value
@nivs_rt_sequence
def gt_equal_use_rtseq1():
a = BooleanValue(0)
a.value = _return_constant() >= 5
return a.value
@nivs_rt_sequence
def gt_equal_use_rtseq2():
a = BooleanValue(0)
a.value = DoubleValue(5) >= _return_constant()
return a.value
@nivs_rt_sequence
def gt_equal_use_rtseq3():
a = BooleanValue(0)
a.value = _return_constant() >= DoubleValue(5)
return a.value
@nivs_rt_sequence
def gt_equal_use_rtseq4():
a = BooleanValue(0)
a.value = I32Value(5) >= _return_constant()
return a.value
@nivs_rt_sequence
def gt_equal_use_rtseq5():
a = BooleanValue(0)
a.value = _return_constant() >= I32Value(5)
return a.value
@nivs_rt_sequence
def gt_equal_with_parentheses():
a = BooleanValue(True)
a.value = 1 >= (2 >= 3)
return a.value
@nivs_rt_sequence
def gt_equal_with_parentheses1():
a = BooleanValue(True)
a.value = 0 >= (DoubleValue(2) >= I32Value(2))
return a.value
@nivs_rt_sequence
def gt_equal_variables():
a = DoubleValue(1)
b = BooleanValue(0)
b.value = a >= 1
return b.value
@nivs_rt_sequence
def gt_equal_variables1():
a = DoubleValue(1)
b = BooleanValue(0)
b.value = a.value >= 1
return b.value
@nivs_rt_sequence
def gt_equal_variable_variable():
a = DoubleValue(1)
b = DoubleValue(2)
c = BooleanValue(True)
c.value = a.value >= b.value
return c.value
@nivs_rt_sequence
def gt_equal_variable_variable1():
a = DoubleValue(2)
b = DoubleValue(2)
c = BooleanValue(False)
c.value = a.value >= b.value
return c.value
@nivs_rt_sequence
def gt_equal_variable_variable2():
a = DoubleValue(2)
b = DoubleValue(2)
c = BooleanValue(False)
c.value = a >= b
return c.value
@nivs_rt_sequence
def gt_equal_variable_rtseq():
a = DoubleValue(5)
b = BooleanValue(False)
b.value = a.value >= _return_constant()
return b.value
@nivs_rt_sequence
def gt_equal_variable_rtseq1():
a = DoubleValue(5)
b = BooleanValue(False)
b.value = _return_constant() >= a.value
return b.value
@nivs_rt_sequence
def gt_equal_to_channel_ref():
a = BooleanValue(False)
b = ChannelReference("Aliases/DesiredRPM")
b.value = 1.0
localhost_wait()
a.value = 1 >= b.value
return a.value
@nivs_rt_sequence
def gt_equal_binary_unary():
a = BooleanValue(0)
a.value = -1 >= - 1
return a.value
@nivs_rt_sequence
def gt_equal_with_multiple_comparators():
a = BooleanValue(True)
a.value = 1 >= 2 >= 3 >= 4
return a.value
@nivs_rt_sequence
def gt_equal_complex_expr():
a = BooleanValue(0)
a.value = 1 >= (1 if 2 < 3 else 4)
return a.value
# <editor-fold desc=Invalid tests>
@nivs_rt_sequence
def gt_equal_invalid_variables():
return a.value >= b
@nivs_rt_sequence
def gt_equal_invalid_variables1():
return a.value >= b.value
@nivs_rt_sequence
def gt_equal_to_None():
a = BooleanValue(0)
a.value = None >= 1 # noqa: E711 the identity operator "is" is not being tested here.
return a.value
@nivs_rt_sequence
def gt_equal_invalid_rtseq_call():
a = BooleanValue(0)
a.value = _return_constant >= 1
return a.value
# </editor-fold>
run_tests = [
(greater_eq_simple_numbers, (), True),
(greater_eq_nivsdatatype_num, (), True),
(greater_eq_nivsdatatype_nivsdatatype, (), True),
(greater_eq_nivsdatatype_nivsdatatype1, (), True),
(greater_eq_nivsdatatype_nivsdatatype2, (), True),
(greater_eq_nivsdatatype_nivsdatatype3, (), True),
(greater_eq_with_parentheses, (), True),
(greater_eq_variables, (), True),
(greater_eq_variables1, (), True),
(greater_eq_variable_variable, (), True),
(greater_eq_variable_variable1, (), True),
(greater_eq_complex_expr, (), True),
(greater_eq_binary_unary, (), True),
(gt_equal_simple_numbers, (), True),
(gt_equal_num_nivsdatatype, (), False),
(gt_equal_nivsdatatype_nivsdatatype, (), True),
(gt_equal_nivsdatatype_nivsdatatype1, (), True),
(gt_equal_nivsdatatype_nivsdatatype2, (), True),
(gt_equal_nivsdatatype_nivsdatatype3, (), False),
(gt_equal_with_parentheses, (), True),
(gt_equal_with_parentheses1, (), False),
(gt_equal_variables, (), True),
(gt_equal_variables1, (), True),
(gt_equal_variable_variable, (), False),
(gt_equal_variable_variable1, (), True),
(gt_equal_variable_variable2, (), True),
(gt_equal_complex_expr, (), True),
(gt_equal_binary_unary, (), True),
(greater_eq_use_rtseq, (), True),
(greater_eq_use_rtseq1, (), True),
(greater_eq_use_rtseq2, (), True),
(greater_eq_use_rtseq3, (), True),
(greater_eq_use_rtseq4, (), True),
(greater_eq_use_rtseq5, (), True),
(greater_eq_variable_rtseq, (), True),
(greater_eq_variable_rtseq1, (), True),
(gt_equal_use_rtseq, (), True),
(gt_equal_use_rtseq1, (), True),
(gt_equal_use_rtseq2, (), True),
(gt_equal_use_rtseq3, (), True),
(gt_equal_use_rtseq4, (), True),
(gt_equal_use_rtseq5, (), True),
(gt_equal_variable_rtseq, (), True),
(gt_equal_variable_rtseq1, (), True),
(greater_eq_to_channel_ref, (), False),
(gt_equal_to_channel_ref, (), True),
(greater_eq_num_nivsdatatype, (), True),
]
fail_transform_tests = [
(greater_eq_invalid_variables, (), TranslateError),
(greater_eq_invalid_variables1, (), TranslateError),
(gt_equal_invalid_variables, (), TranslateError),
(gt_equal_invalid_variables1, (), TranslateError),
(greater_eq_to_None, (), TranslateError),
(gt_equal_to_None, (), TranslateError),
(greater_eq_invalid_rtseq_call, (), VeristandError),
(gt_equal_invalid_rtseq_call, (), VeristandError),
(greater_eq_multiple_types, (), TranslateError),
(greater_eq_multiple_types1, (), TranslateError),
(greater_eq_with_multiple_comparators, (), TranslateError),
(gt_equal_multiple_types, (), TranslateError),
(gt_equal_multiple_types1, (), TranslateError),
(gt_equal_with_multiple_comparators, (), TranslateError),
]
def idfunc(val):
try:
return val.__name__
except AttributeError:
return str(val)
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_transform(func_name, params, expected_result):
RealTimeSequence(func_name)
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_runpy(func_name, params, expected_result):
actual = func_name(*params)
assert actual == expected_result
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_run_py_as_rts(func_name, params, expected_result):
actual = realtimesequencetools.run_py_as_rtseq(func_name)
assert actual == expected_result
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_run_in_VM(func_name, params, expected_result):
actual = rtseqrunner.run_rtseq_in_VM(func_name)
assert actual == expected_result
@pytest.mark.parametrize("func_name, params, expected_result", fail_transform_tests, ids=idfunc)
def test_failures(func_name, params, expected_result):
with pytest.raises(expected_result):
RealTimeSequence(func_name)
with pytest.raises(expected_result):
func_name(*params)
def test_check_all_tested():
validation.test_validate(sys.modules[__name__])
| StarcoderdataPython |
1621049 | # File: simple_cipher.py
# Purpose: Implement a simple shift cipher like Caesar and a more secure substitution cipher
# Programmer: <NAME>
# Course: Exercism
# Date: Monday 26 September 2016, 02:00 AM
import random
from string import ascii_lowercase
letters = ascii_lowercase
class Cipher():
"""Generate a key for Cipher if not provided."""
def __init__(self, key=None):
if not key:
key = ''.join(random.SystemRandom().choice(ascii_lowercase) for _ in range(150))
elif not key.isalpha() or not key.islower():
raise ValueError('Invalid key')
self.key = key
def encode(self, text):
key = self.key
while len(key) < len(text):
key += self.key
cipher = ""
for i in range(len(text)):
letter = text.lower()[i]
if letter in letters:
cipher += letters[(letters.index(letter)+letters.index(key[i])) % 26]
return cipher
def decode(self, ciph):
key = self.key
while len(key) < len(ciph):
key += self.key
txt = ""
for i in range(len(ciph)):
letter = ciph.lower()[i]
if letter in letters:
txt += letters[(letters.index(letter)-letters.index(key[i])) % 26]
return txt
class Caesar():
def encode(self, text):
return ''.join([letters[(letters.index(letter)+3) % 26] \
for letter in text.lower() if letter in letters])
def decode(self, ciph):
return ''.join([letters[(letters.index(letter)-3) % 26] \
for letter in ciph.lower() if letter in letters])
| StarcoderdataPython |
1601415 | #!/usr/bin/env python3
with open('20_input.txt', 'r') as f:
data = f.read()
def get_example(n):
examples = {
0: "^WNE$",
1: "^ENWWW(NEEE|SSE(EE|N))$",
2: "^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$",
3: "^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$",
4: "^WSSEESWWWNW(S|NENNEEEENN(ESSSSW(NWSW|SSEN)|WSWWN(E|WWS(E|SS))))$",
}
return examples[n]
dir = {'N': 1j, 'S': -1j, 'E': 1, 'W': -1}
def traverse(it, map={0: 0}, pos=0, depth=0):
initial_pos = pos
initial_depth = depth
for c in it:
if c in dir:
pos += dir[c]
if pos in map: # been here before, so we are backtracking
depth = map[pos]
else:
depth += 1
map[pos] = depth
elif c == '|':
pos = initial_pos
depth = initial_depth
elif c == '(':
traverse(it, map, pos, depth)
elif c == ')':
return
elif c == '$':
return map
else:
print(f'Unknown character: {c}')
# data = get_example(4)
map = traverse(iter(data[1:]))
print('Part 1:', max(map.values()))
print('Part 2:', len([n for n in map.values() if n >= 1000]))
| StarcoderdataPython |
4822214 | <reponame>ofrik/Seq2Seq
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
import pandas as pd
from nltk import word_tokenize
from tqdm import tqdm
from nltk import FreqDist
import re
import numpy as np
tqdm.pandas()
def read_data():
"""
Read the english and hebrew translations into a dataframe
:return: Dataframe with the matched sentences
"""
df = pd.read_csv("data/eng_heb_sentences.csv", encoding="utf8", names=["hebrew_sentences", "english_sentences"],
header=0)
df["english_sentences"] = df["english_sentences"].apply(lambda x: x.lower())
return df
def decontracted(phrase):
"""
Change words abbreviations into their actual word meaning
:param phrase: sentence we want to change
:return: modified sentence
"""
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
def get_vocab(series, addtional_tokens=[], top=None):
"""
extract the vocabulary out of an array, allow to add additional tokens to the vocabulary and choose only the top n frequent words.
:param series: array of sentences
:param addtional_tokens: additional tokens we want to include in the vocabulary
:param top: top n frequent words we want to include in the vocabulary
:return: map from a word to its numeric representation and the opposite map
"""
rev_vocab = addtional_tokens
freq_vocab = FreqDist()
for s in tqdm(series):
freq_vocab.update(word_tokenize(decontracted(s)))
print("Original vocab size %s" % len(freq_vocab))
all_words_sorted = sorted(freq_vocab, key=freq_vocab.get, reverse=True)
top_words = all_words_sorted[:top]
rev_vocab += top_words
vocab = {word: index for index, word in enumerate(rev_vocab)}
return vocab, rev_vocab
def vectorize_sentences(sentences, vocab, add_prefix_token=None, add_suffix_token=None, encode=False, reverse=False):
"""
create a vector out of a sentence
:param sentences: array of sentences
:param vocab: the vocabulary that will map the words into numeric representation
:param add_prefix_token: a token to add as prefix to the sentence vector
:param add_suffix_token: a token to add as suffix to the sentence vector
:param encode: if to encode the words into numeric representation
:param reverse: if to reverse the sentence vector
:return: array of vectorized sentences
"""
vectorized_sentences = []
for s in tqdm(sentences):
sentence = []
for word in word_tokenize(decontracted(s)):
if word in vocab:
if encode:
sentence.append(vocab[word])
else:
sentence.append(word)
else:
if encode:
sentence.append(vocab["<UNK>"])
else:
sentence.append("<UNK>")
if add_prefix_token is not None:
if encode:
sentence = [vocab[add_prefix_token]] + sentence
else:
sentence = [add_prefix_token] + sentence
if add_suffix_token is not None:
if encode:
sentence = sentence + [vocab[add_suffix_token]]
else:
sentence = sentence + [add_suffix_token]
if reverse:
vectorized_sentences.append(np.array(sentence[::-1]))
else:
vectorized_sentences.append(np.array(sentence))
return np.array(vectorized_sentences)
if __name__ == '__main__':
pass
# df = read_data()
# # df = clean_english_sentences(df)
# eng_vocab, rev_eng_vocab = get_vocab(df["english_sentences"], addtional_tokens=["<UNK>"], top=15000)
# heb_vocab, rev_heb_vocab = get_vocab(df["hebrew_sentences"], addtional_tokens=["<UNK>","<START>","<EOS>"], top=30000)
# vect_eng_sentences = vectorize_sentences(df["english_sentences"], eng_vocab)
# vect_heb_sentences = vectorize_sentences(df["hebrew_sentences"], heb_vocab)
# pass
| StarcoderdataPython |
3200975 | """
davies.math: basic mathematics routines for reduction of survey data
This is "slow math", operating on scalar values without vector math (no `numpy` dependency).
"""
import math
__all__ = 'hd', 'vd', 'cartesian_offset', 'angle_delta', \
'm2ft', 'ft2m'
#
# Unit Conversions
#
def m2ft(m):
"""Convert meters to feet"""
return m * 3.28084
def ft2m(ft):
"""Convert feet to meters"""
return ft * 0.3048
#
# Trig Routines
#
def hd(inc, sd):
"""
Calculate horizontal distance.
:param inc: (float) inclination angle in degrees
:param sd: (float) slope distance in any units
"""
return sd * math.cos(math.radians(inc))
def vd(inc, sd):
"""
Calculate vertical distance.
:param inc: (float) inclination angle in degrees
:param sd: (float) slope distance in any units
"""
return abs(sd * math.sin(math.radians(inc)))
def cartesian_offset(azm, inc, sd, origin=(0, 0)):
"""
Calculate the (X, Y) cartesian coordinate offset.
:param azm: (float) azimuth angle in degrees
:param inc: (float) inclination angle in degrees
:param sd: (float) slope distance in any units
:param origin: (tuple(float, float)) optional origin coordinate
"""
hd = sd * math.cos(math.radians(inc))
x = hd * math.sin(math.radians(azm))
y = hd * math.cos(math.radians(azm))
return (x, y) if not origin else (x+origin[0], y+origin[1])
def angle_delta(a1, a2):
"""
Calculate the absolute difference between two angles in degrees
:param a1: (float) angle in degrees
:param a2: (float) angle in degrees
"""
return 180 - abs(abs(a1 - a2) - 180)
| StarcoderdataPython |
169860 | <gh_stars>0
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from instanotifier.api.notification import views as notification_views
app_name = "api-v1"
router = DefaultRouter()
router.include_root_view = False
router.register(
r'rss-search',
notification_views.NotificationSearchViewSet,
basename='rss'
)
urlpatterns = router.urls
urlpatterns += [
url(
r"^$",
notification_views.NotificationListView.as_view(),
name="rssnotification-list",
),
# url(r'^notifications/(?P<pk>\d+)/rating/(?P<rating>[\w]+)$', notification_views.NotificationVotingView.as_view(), name='rssnotification-rating'),
url(
r"^notifications/rating/$",
notification_views.NotificationVotingView.as_view(),
name="rssnotification-rating",
),
url(
r"^dates/$",
notification_views.NotificationDatesListView.as_view(),
name="rssnotification-date-list",
),
]
| StarcoderdataPython |
3274397 | # -*- coding: utf-8 -*-
"""Application configuration."""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('MALL_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False #跟踪 默认True
'''
多少秒后自动回收连接。这对 MySQL 是必要的,
它默认移除闲置多于 8 小时的连接。注意如果 使用了 MySQL ,
Flask-SQLALchemy 自动设定 这个值为 2 小时。
'''
SQLALCHEMY_POOL_RECYCLE = 9000
#ckeditor config
CKEDITOR_WIDTH = 500
CKEDITOR_HEIGHT = 300
ALLOWED_EXTENSIONS_EXCEL = set(['xlsx'])
UPLOADED_PATH = 'data/uploads/'
#取消sql自动提交
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
#图片上传
THUMBNAIL_FOLDER = 'data/uploads/thumbnail/'
ALLOWED_EXTENSIONS_IMAGES = set(['png', 'jpg', 'jpeg', 'gif'])
#最大上传文件大小
MAX_CONTENT_LENGTH = 3 * 1024 * 1024 # 2MB
MALL_WECHAT_TOKEN = ''
#测试号
# WECHAT_TYPE = 0 #0 订阅号 服务号
WECHAT_APPID = os.environ.get('MALL_WECHAT_APPID') or 'wxb27de34ba5055b6b'
WECHAT_SECRET = os.environ.get('MALL_WECHAT_SECRET') or '1ea339c37b7e356def3d9aea0da65d85'
WECHAT_TOKEN = os.environ.get('MALL_WECHAT_TOKEN') or 'wx_get_token_1234567890acb'
# WECHAT_APPID = 'wxb27de34ba5055b6b'
# WECHAT_SECRET = '1ea339c37b7e356def3d9aea0da65d85'
# WECHAT_TOKEN = 'wx_get_token_1234567890acb'
#ckeditor config
#ckeditor 图片上传地址url
#ckeditor服务器上传函数
CKEDITOR_FILE_UPLOADER = '/upload'
#上传存放路径
CKEDITOR_FILE_UPLOAD_URL = 'data/ckeditor/uploads/'
# CKEDITOR_FILE_BROWSER_URL = 'data/ckeditor/uploads/'
MAIL_SERVER = 'smtp.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or '123'
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or '<PASSWORD>'
MAIL_RECIPIENTS_NAME = os.environ.get('MAIL_RECIPIENTS_NAME') or '123'
MAIL_DEBUG = False
SUPERADMIN_NAME = 'admin'
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('prod_mall_database_url') or \
'mysql://root:@127.0.0.1:3306/mall'
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
SQLALCHEMY_DATABASE_URI = os.environ.get('prod_mall_database_url') or \
'mysql://root:@127.0.0.1:3306/mall'
DEBUG_TB_ENABLED = True
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| StarcoderdataPython |
71162 | def leiaInt(mgn):
while True:
try:
n = int(input(mgn))
except (ValueError, TypeError):
print('\033[031mErro: por favor, digite um número interio válido.\033[m')
else:
return n
break
def leiaFloat(mgn):
while True:
try:
n = float(input(mgn))
except (ValueError, TypeError):
print('\033[031mErro: por favor, digite um número real válido.\033[m')
else:
return f'{n:.2f}'
break
nu = leiaInt('Digite um valor inteiro: ')
fl = leiaFloat('Digite um número real: ')
print(f'O valor Inteiro é {nu} e o número real é {fl}') | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.