id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9608954 | <filename>Python/TimeSeries/redeem_prediction.py
import matplotlib.pylab as plt
import pandas as pd
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import numpy as np
import sys
from statsmodels.tsa.arima_model import ARMA
# 差分操作,d代表差分序列,比如[1,1,1]可以代表3阶差分。 [12,1]可以代表第一次差分偏移量是12,第二次差分偏移量是1
def diff_ts(ts, d):
global shift_ts_list
# 动态预测第二日的值时所需要的差分序列
global last_data_shift_list #这个序列在恢复过程中需要用到
shift_ts_list = []
last_data_shift_list = []
tmp_ts = ts
for i in d:
last_data_shift_list.append(tmp_ts[-i])
print (last_data_shift_list)
shift_ts = tmp_ts.shift(i)
shift_ts_list.append(shift_ts)
tmp_ts = tmp_ts - shift_ts
tmp_ts.dropna(inplace=True)
return tmp_ts
def predict_diff_recover(predict_value, d):
if isinstance(predict_value, float):
tmp_data = predict_value
for i in range(len(d)):
tmp_data = tmp_data + last_data_shift_list[-i-1]
elif isinstance(predict_value, np.ndarray):
tmp_data = predict_value[0]
for i in range(len(d)):
tmp_data = tmp_data + last_data_shift_list[-i-1]
else:
tmp_data = predict_value
for i in range(len(d)):
try:
tmp_data = tmp_data.add(shift_ts_list[-i-1])
except:
raise ValueError('What you input is not pd.Series type!')
tmp_data.dropna(inplace=True)
return tmp_data
def test_stationarity(timeseries):
# Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=30)
rolstd = pd.rolling_std(timeseries, window=30)
# Plot rolling statistics:
orig = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
# Perform Dickey-Fuller test:
print ('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' % key] = value
print(dfoutput)
def proper_model(ts_log_diff, maxLag):
best_p = 0
best_q = 0
best_bic = sys.maxsize
best_model=None
for p in np.arange(maxLag):
for q in np.arange(maxLag):
model = ARMA(ts_log_diff, order=(p, q))
try:
results_ARMA = model.fit(disp=-1)
except:
continue
bic = results_ARMA.bic
print (bic, best_bic)
if bic < best_bic:
best_p = p
best_q = q
best_bic = bic
best_model = results_ARMA
print(best_p,best_q,best_model)
df = pd.read_csv('./file/user_balance_table_all.csv', index_col='user_id', names=['user_id', 'report_date', 'tBalance', 'yBalance', 'total_purchase_amt', 'direct_purchase_amt', 'purchase_bal_amt', 'purchase_bank_amt', 'total_redeem_amt', 'consume_amt', 'transfer_amt', 'tftobal_amt', 'tftocard_amt', 'share_amt', 'category1', 'category2', 'category3', 'category4'
], parse_dates=[1])
df['report_date'] = pd.to_datetime(df['report_date'], errors='coerce')
df['total_purchase_amt'] = pd.to_numeric(df['total_purchase_amt'], errors='coerce')
df['total_redeem_amt'] = pd.to_numeric(df['total_redeem_amt'], errors='coerce')
df['purchase_bank_amt'] = pd.to_numeric(df['purchase_bank_amt'], errors='coerce')
df = df.groupby('report_date').sum()
ts = df['total_redeem_amt']
# ts = ts['2014-04-01':'2014-06-30']
# print('原数据ADF')
# test_stationarity(ts)
ts.plot()
plt.title('Redeem before April')
plt.show()
#一阶差分
diff_1 = diff_ts(ts, [1])
diff_1.plot()
# plt.title('Total purchase first difference')
plt.title('Total redeem first difference')
plt.show()
plt.figure()
plt.axhline(y=-1.96/np.sqrt(len(ts)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts)),linestyle='--',color='gray')
plot_acf(ts, ax=plt.gca(), lags=60)
plt.show()
plt.axhline(y=-1.96/np.sqrt(len(ts)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts)),linestyle='--',color='gray')
plot_pacf(ts, ax=plt.gca(), lags=60)
plt.show()
diff_1 = diff_ts(ts, [1])
print('一阶差分数据ADF')
test_stationarity(diff_1)
plt.figure()
plt.axhline(y=-1.96/np.sqrt(len(diff_1)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(diff_1)),linestyle='--',color='gray')
plot_acf(diff_1, ax=plt.gca(), lags=60)
plt.show()
plt.axhline(y=-1.96/np.sqrt(len(diff_1)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(diff_1)),linestyle='--',color='gray')
plot_pacf(diff_1, ax=plt.gca(), lags=60)
plt.show()
rol_mean = ts.rolling(window=7).mean()
rol_mean.dropna(inplace=True)
rol_mean.plot()
plt.title('Rolling Mean')
plt.show()
ts_diff_1 = diff_ts(rol_mean, [1])
ts_diff_1.plot()
plt.title('First Difference')
plt.show()
print('移动平均并差分后ADF')
test_stationarity(ts_diff_1)
plt.figure()
plt.axhline(y=-1.96/np.sqrt(len(ts_diff_1)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts_diff_1)),linestyle='--',color='gray')
plot_acf(ts_diff_1, ax=plt.gca(), lags=60)
plt.show()
plt.axhline(y=-1.96/np.sqrt(len(ts_diff_1)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts_diff_1)),linestyle='--',color='gray')
plot_pacf(ts_diff_1, ax=plt.gca(), lags=60)
plt.show()
proper_model(ts_diff_1, 10)
model = ARMA(ts_diff_1, order=(0, 7))
result_arma = model.fit(disp=-1, method='css')
predict_ts = result_arma.predict()
predict_ts.plot(label='predicted')
ts_diff_1.plot(label='original')
plt.legend(loc='best')
plt.show()
recovery_diff_1 = predict_diff_recover(predict_ts, [1])
recovery_diff_1.plot()
plt.show()
rol_sum = ts.rolling(window=6).sum()
rol_recover = recovery_diff_1*7 - rol_sum.shift(1)
rol_recover.plot(label='predicted')
ts.plot(label='original')
plt.legend(loc='best')
plt.show()
ts = ts['2014-05-01':'2014-05-31']
rol_recover = rol_recover['2014-05-01':'2014-05-31']
print(ts)
print(rol_recover)
| StarcoderdataPython |
4975208 | <gh_stars>0
"""Compute and write Sparameters using Meep in MPI."""
import multiprocessing
import pathlib
import pickle
import shlex
import subprocess
import sys
import time
from pathlib import Path
from typing import Optional
import pydantic
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.config import logger, sparameters_path
from gdsfactory.pdk import get_layer_stack
from gdsfactory.simulation import port_symmetries
from gdsfactory.simulation.get_sparameters_path import (
get_sparameters_path_meep as get_sparameters_path,
)
from gdsfactory.simulation.gmeep.write_sparameters_meep import (
remove_simulation_kwargs,
settings_write_sparameters_meep,
)
from gdsfactory.tech import LayerStack
from gdsfactory.types import ComponentSpec, PathType
ncores = multiprocessing.cpu_count()
temp_dir_default = Path(sparameters_path) / "temp"
@pydantic.validate_arguments
def write_sparameters_meep_mpi(
component: ComponentSpec,
layer_stack: Optional[LayerStack] = None,
cores: int = ncores,
filepath: Optional[PathType] = None,
dirpath: Optional[PathType] = None,
temp_dir: Path = temp_dir_default,
temp_file_str: str = "write_sparameters_meep_mpi",
overwrite: bool = False,
wait_to_finish: bool = True,
**kwargs,
) -> Path:
"""Write Sparameters using multiple cores and MPI
and returns Sparameters CSV filepath.
Simulates each time using a different input port (by default, all of them)
unless you specify port_symmetries:
checks stderror and kills MPI job if there is any stderror message
port_symmetries = {"o1":
{
"s11": ["s22","s33","s44"],
"s21": ["s21","s34","s43"],
"s31": ["s13","s24","s42"],
"s41": ["s14","s23","s32"],
}
}
Args:
component: gdsfactory Component.
cores: number of processors.
filepath: to store pandas Dataframe with Sparameters in CSV format.
Defaults to dirpath/component_.csv.
dirpath: directory to store sparameters in CSV.
Defaults to active Pdk.sparameters_path.
layer_stack: contains layer to thickness, zmin and material.
Defaults to active pdk.layer_stack.
temp_dir: temporary directory to hold simulation files.
temp_file_str: names of temporary files in temp_dir.
overwrite: overwrites stored simulation results.
wait_to_finish: if True makes the function call blocking.
Keyword Args:
resolution: in pixels/um (30: for coarse, 100: for fine).
port_symmetries: Dict to specify port symmetries, to save number of simulations.
dirpath: directory to store Sparameters.
port_margin: margin on each side of the port.
port_monitor_offset: offset between monitor GDS port and monitor MEEP port.
port_source_offset: offset between source GDS port and source MEEP port.
filepath: to store pandas Dataframe with Sparameters in CSV format.
animate: saves a MP4 images of the simulation for inspection, and also
outputs during computation. The name of the file is the source index.
lazy_parallelism: toggles the flag "meep.divide_parallel_processes" to
perform the simulations with different sources in parallel.
dispersive: use dispersive models for materials (requires higher resolution).
xmargin: left and right distance from component to PML.
xmargin_left: west distance from component to PML.
xmargin_right: east distance from component to PML.
ymargin: top and bottom distance from component to PML.
ymargin_top: north distance from component to PML.
ymargin_bot: south distance from component to PML.
extend_ports_length: to extend ports beyond the PML.
zmargin_top: thickness for cladding above core.
zmargin_bot: thickness for cladding below core.
tpml: PML thickness (um).
clad_material: material for cladding.
is_3d: if True runs in 3D.
wavelength_start: wavelength min (um).
wavelength_stop: wavelength max (um).
wavelength_points: wavelength steps.
dfcen: delta frequency.
port_source_name: input port name.
port_field_monitor_name: for monitor field decay.
port_margin: margin on each side of the port.
distance_source_to_monitors: in (um) source goes before.
port_source_offset: offset between source GDS port and source MEEP port.
port_monitor_offset: offset between monitor GDS port and monitor MEEP port.
Returns:
filepath for sparameters CSV (wavelengths, s11a, s12m, ...)
where `a` is the angle in radians and `m` the module
TODO:
write stdout to file, maybe simulation logs too
"""
for setting in kwargs.keys():
if setting not in settings_write_sparameters_meep:
raise ValueError(f"{setting} not in {settings_write_sparameters_meep}")
component = gf.get_component(component)
assert isinstance(component, Component)
layer_stack = layer_stack or get_layer_stack()
settings = remove_simulation_kwargs(kwargs)
filepath = filepath or get_sparameters_path(
component=component,
dirpath=dirpath,
layer_stack=layer_stack,
**settings,
)
filepath = pathlib.Path(filepath)
if filepath.exists() and not overwrite:
logger.info(f"Simulation {filepath!r} already exists")
return filepath
if filepath.exists() and overwrite:
filepath.unlink()
# Save all the simulation arguments for later retrieval
temp_dir.mkdir(exist_ok=True, parents=True)
tempfile = temp_dir / temp_file_str
parameters_file = tempfile.with_suffix(".pkl")
kwargs.update(filepath=str(filepath))
parameters_dict = {
"component": component,
"layer_stack": layer_stack,
"overwrite": overwrite,
}
# Loop over kwargs
for key in kwargs.keys():
parameters_dict[key] = kwargs[key]
with open(parameters_file, "wb") as outp:
pickle.dump(parameters_dict, outp, pickle.HIGHEST_PROTOCOL)
# Write execution file
script_lines = [
"import pickle\n",
"from gdsfactory.simulation.gmeep import write_sparameters_meep\n\n",
'if __name__ == "__main__":\n\n',
f"\twith open(\"{parameters_file}\", 'rb') as inp:\n",
"\t\tparameters_dict = pickle.load(inp)\n\n" "\twrite_sparameters_meep(\n",
]
script_lines.extend(
f'\t\t{key} = parameters_dict["{key}"],\n' for key in parameters_dict
)
script_lines.append("\t)")
script_file = tempfile.with_suffix(".py")
with open(script_file, "w") as script_file_obj:
script_file_obj.writelines(script_lines)
command = f"mpirun -np {cores} python {script_file}"
logger.info(command)
logger.info(str(filepath))
with subprocess.Popen(
shlex.split(command),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
print(proc.stdout.read().decode())
print(proc.stderr.read().decode())
sys.stdout.flush()
sys.stderr.flush()
if wait_to_finish and not proc.stderr:
while not filepath.exists():
print(proc.stdout.read().decode())
print(proc.stderr.read().decode())
sys.stdout.flush()
sys.stderr.flush()
time.sleep(1)
return filepath
write_sparameters_meep_mpi_1x1 = gf.partial(
write_sparameters_meep_mpi, port_symmetries=port_symmetries.port_symmetries_1x1
)
write_sparameters_meep_mpi_1x1_bend90 = gf.partial(
write_sparameters_meep_mpi,
ymargin_bot=3,
ymargin=0,
xmargin_right=3,
port_symmetries=port_symmetries.port_symmetries_1x1,
)
if __name__ == "__main__":
c1 = gf.components.straight(length=2.1)
filepath = write_sparameters_meep_mpi(
component=c1,
# ymargin=3,
cores=2,
run=True,
overwrite=True,
# lazy_parallelism=True,
lazy_parallelism=False,
# filepath="instance_dict.csv",
)
| StarcoderdataPython |
9774847 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class DjangaeConfig(AppConfig):
name = 'djangae'
verbose_name = _("Djangae")
def ready(self):
from .patches.contenttypes import patch
patch()
| StarcoderdataPython |
8163548 | from django.db import models
from django.conf import settings
from utils.models.abstract import DatetimeCreatedModel
class Todo(DatetimeCreatedModel):
"""Our todo model."""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name="todos"
)
title = models.CharField(max_length=100)
description = models.CharField(max_length=150)
is_completed = models.BooleanField(default=False)
datetime_completed = models.DateTimeField(blank=True, null=True)
class Meta:
app_label = "todo"
db_table = "todos"
def __repr__(self) -> str:
return "Todo(pk={}, title={}, user={})".format(
self.pk,
self.title,
self.user
)
def __str__(self) -> str:
return self.title
| StarcoderdataPython |
12865042 |
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.exceptions import NotFittedError
from inter.interfaces import QLearning
from utility import set_all_args
class NFQ(QLearning):
gamma = 0.9
beta = 0.8
def __init__(self, **kwargs):
self.mlp = MLPRegressor(
hidden_layer_sizes=(5,5), activation='logistic', batch_size=400)
set_all_args(self, kwargs)
def fit(self, data, max_iter=300, intra_step=50):
"""
data is the triple (ss, as, rs)
"""
for _ in range(max_iter):
inputs, targets = self.compute_inputs_targets(data)
for _ in range(intra_step):
self.mlp.partial_fit(inputs, targets)
def compute_inputs_targets(self, data):
inputs, targets = [], []
for i in range(len(data[0])-1):
s, a, r = list(data[0][i]), data[1][i], data[2][i]
s_next = list(data[0][i+1])
inputs.append(s + [self.actions.index(a)])
to_prs = [s_next + [act] for act in range(len(self.actions))]
try:
q_values = self.mlp.predict(to_prs)
targets.append(r + self.gamma * np.max(q_values))
except NotFittedError:
targets.append(r)
return np.array(inputs), np.array(targets)
def score(self, data):
inputs, targes = self.compute_inputs_targets(data)
return self.mlp.score(inputs, targes)
def decision(self, state):
state = list(state)
to_prs = [state + [act] for act in range(len(self.actions))]
q_values = self.mlp.predict(to_prs)
ps = np.exp(self.beta * q_values)
a_num = np.random.choice(len(self.actions), p=ps/np.sum(ps))
return self.actions[a_num]
| StarcoderdataPython |
383563 | """Clowder command line link controller
.. codeauthor:: <NAME> <<EMAIL>>
"""
import argparse
import clowder.util.formatting as fmt
from clowder.clowder_controller import print_clowder_name
from clowder.git.clowder_repo import ClowderRepo, print_clowder_repo_status
from clowder.environment import clowder_repo_required, ENVIRONMENT
from clowder.util.error import ExistingSymlinkError
from clowder.util.yaml import link_clowder_yaml_default, link_clowder_yaml_version
from .util import add_parser_arguments
def add_link_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder link parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('link', help='Symlink clowder yaml version')
parser.formatter_class = argparse.RawTextHelpFormatter
parser.set_defaults(func=link)
versions = ClowderRepo.get_saved_version_names()
add_parser_arguments(parser, [
(['version'], dict(metavar='<version>', choices=versions, nargs='?', default=None,
help=fmt.version_options_help_message('version to symlink', versions)))
])
@print_clowder_name
@clowder_repo_required
@print_clowder_repo_status
def link(args) -> None:
"""Clowder link command private implementation
:raise ExistingSymlinkError:
"""
if ENVIRONMENT.clowder_yaml is not None and not ENVIRONMENT.clowder_yaml.is_symlink():
raise ExistingSymlinkError(f"Found non-symlink file {fmt.path(ENVIRONMENT.clowder_yaml)} at target path")
if args.version is None:
link_clowder_yaml_default(ENVIRONMENT.clowder_dir)
else:
link_clowder_yaml_version(ENVIRONMENT.clowder_dir, args.version)
| StarcoderdataPython |
4973668 | from styx_msgs.msg import TrafficLight
import numpy as np
import tensorflow as tf
import rospy
class TLClassifier(object):
def __init__(self, site = False):
#TODO load classifier
self.loaded = False
if site:
pass
else:
self.detection_graph = self.load_graph('ssd_inception.pb')
self.input_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.sess = tf.Session(graph=self.detection_graph)
self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes], feed_dict={self.input_tensor: np.zeros((1, 300, 400, 3), np.uint8)})
self.loaded = True
def load_graph(self, graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
if not self.loaded:
return TrafficLight.UNKNOWN
ret = TrafficLight.UNKNOWN
#with tf.Session(graph=self.detection_graph) as sess:
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes], feed_dict={self.input_tensor: image})
#rospy.loginfo(str(zip(classes, scores)))
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
#rospy.loginfo('classes:{}, scores:{}'.format(classes, scores))
if len(classes) > 0:
ret = int(classes[np.argmax(scores)])
return ret
| StarcoderdataPython |
377770 | #!/usr/bin/env python3
import math
import time
def main():
digits = [int(x) for x in str(math.factorial(100))]
print("Solution:", sum(digits))
if __name__ == "__main__":
start = time.time()
main()
end = time.time()
print("Duration: {0:0.6f}s".format(end - start))
| StarcoderdataPython |
5025230 | #!/usr/bin/env fontforge -lang=py -script
# -*- coding: utf-8 -*-
#
# converted font format otf to ttf. (add gasp table).
#
# need: mkdir ttf
# all convert: ls -1 | grep .otf | xargs -I{} fontforge --script font_extra.py {}
import fontforge
import sys
import os
def get_gasp():
return (
(8, ('antialias',)),
(16, ('antialias', 'symmetric-smoothing')),
(65535, ('gridfit', 'antialias', 'symmetric-smoothing', 'gridfit+smoothing')),
)
if (2 != len(sys.argv)):
print 'Usage: # fontforge --script %s fontfile_path' % sys.argv[0]
quit()
target_font_path = sys.argv[1]
print "target:" + target_font_path
target_font_name = os.path.basename(target_font_path)
target_font_name, ext = os.path.splitext(target_font_name)
font = fontforge.open(target_font_path)
# Grid Fittingを設定
font.gasp_version = 1
font.gasp = get_gasp()
font.generate("./ttf/" + target_font_name + ".ttf", '', ('short-post', 'opentype', 'PfEd-lookups'));
font.close()
| StarcoderdataPython |
4981702 | <filename>grocery/products/admin.py
from django.contrib import admin
from .models import Product, Category, Offer
# Register your models here.
admin.site.register(Product)
admin.site.register(Category)
admin.site.register(Offer)
| StarcoderdataPython |
1634143 | #!/usr/bin/python3
# Author: @AgbaD | @Agba_dr3
def missing(arr):
a = sorted(arr)
x = max(a)
for i in range(1, x):
if i not in a:
return i
return x+1
if __name__ == "__main__":
arr1 = [3, 4, -1, 1]
arr2 = [1, 2, 0]
print(missing(arr1))
print(missing(arr2))
| StarcoderdataPython |
8162715 | <gh_stars>1000+
"""
Candlestick Chart
=================
A candlestick chart inspired from `Protovis <http://mbostock.github.io/protovis/ex/candlestick.html>`_.
This example shows the performance of the Chicago Board Options Exchange `Volatility Index <https://en.wikipedia.org/wiki/VIX>`_ (VIX)
in the summer of 2009. The thick bar represents the opening and closing prices,
while the thin bar shows intraday high and low prices; if the index closed higher on a given day, the bars are colored green rather than red.
"""
# category: other charts
import altair as alt
from vega_datasets import data
source = data.ohlc()
open_close_color = alt.condition("datum.open <= datum.close",
alt.value("#06982d"),
alt.value("#ae1325"))
base = alt.Chart(source).encode(
alt.X('date:T',
axis=alt.Axis(
format='%m/%d',
labelAngle=-45,
title='Date in 2009'
)
),
color=open_close_color
)
rule = base.mark_rule().encode(
alt.Y(
'low:Q',
title='Price',
scale=alt.Scale(zero=False),
),
alt.Y2('high:Q')
)
bar = base.mark_bar().encode(
alt.Y('open:Q'),
alt.Y2('close:Q')
)
rule + bar | StarcoderdataPython |
221922 | #-*- coding:utf-8 -*-
__author__ ="https://github.com/mskhrk"
import os
from cfg_api import *
my_dir=os.path.dirname(__file__)
cfg_name='test.cfg'
cfg_path=my_dir+'\\'+cfg_name
work_dir='C:/Users/user/Desktop'
teaget_dir='C:/Users/user/Desktop'
teaget_dir2='C:/Users/user/Desktop/ttr'
#讀取指定目錄的檔案
#cfg_read_dir(cfg_path,teaget_dir)
cfg_read_alldir(cfg_path,teaget_dir)
'''
api_example
cfg_add_section(cfg_path,'Swection1')
cfg_remove_section(cfg_path,'Swection1')
cfg_add_data(cfg_path,'Swection2','dDdd','111')
cfg_remove_option(cfg_path,'Swection2','ddd')
cfg_get_data(cfg_path,'filelist2','0')
cfg_control_get_newid(cfg_path)
cfg_remove_id(cfg_path,'5')
cfg_read_dir(cfg_path,teaget_dir)
cfg_read_alldir(cfg_path,teaget_dir)
'''
| StarcoderdataPython |
3582693 | <filename>python/python-odata-try.py
from odata import ODataService
#import pdb;pdb.set_trace()
url = 'http://services.odata.org/V4/Northwind/Northwind.svc/'
Service = ODataService(url, reflect_entities=True)
Supplier = Service.entities['Supplier']
query = Service.query(Supplier)
query = query.limit(2)
query = query.order_by(Supplier.CompanyName.asc())
for supplier in query:
print('Company:', supplier.CompanyName)
for product in supplier.Products:
print('- Product:', product.ProductName)
| StarcoderdataPython |
4875344 | # DEPRECATED - this file is dead, and should be removed by the end of the redesign project
from portality.formcontext.formhelper import FormHelperBS3
from portality.formcontext.choices import Choices
from copy import deepcopy
class Renderer(object):
def __init__(self):
self.FIELD_GROUPS = {}
self.fh = FormHelperBS3()
self._error_fields = []
self._disabled_fields = []
self._disable_all_fields = False
self._highlight_completable_fields = False
def check_field_group_exists(self, field_group_name):
""" Return true if the field group exists in this form """
group_def = self.FIELD_GROUPS.get(field_group_name)
if group_def is None:
return False
else:
return True
def render_field_group(self, form_context, field_group_name=None, group_cfg=None):
if field_group_name is None:
return self._render_all(form_context)
# get the group definition
group_def = self.FIELD_GROUPS.get(field_group_name)
if group_def is None:
return ""
# build the frag
frag = ""
for entry in group_def:
field_name = list(entry.keys())[0]
config = entry.get(field_name)
config = deepcopy(config)
config = self._rewrite_extra_fields(form_context, config)
field = form_context.form[field_name]
if field_name in self.disabled_fields or self._disable_all_fields is True:
config["disabled"] = "disabled"
if self._highlight_completable_fields is True:
valid = field.validate(form_context.form)
config["complete_me"] = not valid
if group_cfg is not None:
config.update(group_cfg)
frag += self.fh.render_field(field, **config)
return frag
@property
def error_fields(self):
return self._error_fields
def set_error_fields(self, fields):
self._error_fields = fields
@property
def disabled_fields(self):
return self._disabled_fields
def set_disabled_fields(self, fields):
self._disabled_fields = fields
def disable_all_fields(self, disable):
self._disable_all_fields = disable
def _rewrite_extra_fields(self, form_context, config):
if "extra_input_fields" in config:
config = deepcopy(config)
for opt, field_ref in config.get("extra_input_fields").items():
extra_field = form_context.form[field_ref]
config["extra_input_fields"][opt] = extra_field
return config
def _render_all(self, form_context):
frag = ""
for field in form_context.form:
frag += self.fh.render_field(form_context, field.short_name)
return frag
def find_field(self, field, field_group):
for index, item in enumerate(self.FIELD_GROUPS[field_group]):
if field in item:
return index
def insert_field_after(self, field_to_insert, after_this_field, field_group):
self.FIELD_GROUPS[field_group].insert(
self.find_field(after_this_field, field_group) + 1,
field_to_insert
)
class BasicJournalInformationRenderer(Renderer):
def __init__(self):
super(BasicJournalInformationRenderer, self).__init__()
# allow the subclass to define the order the groups should be considered in. This is useful for
# numbering questions and determining first errors
self.NUMBERING_ORDER = ["basic_info", "editorial_process", "openness", "content_licensing", "copyright"]
self.ERROR_CHECK_ORDER = deepcopy(self.NUMBERING_ORDER)
# define the basic field groups
self.FIELD_GROUPS = {
"basic_info" : [
{"title" : {"class": "input-xlarge"}},
{"url" : {"class": "input-xlarge"}},
{"alternative_title" : {"class": "input-xlarge"}},
{"pissn" : {"class": "input-small", "size": "9", "maxlength": "9"}},
{"eissn" : {"class": "input-small", "size": "9", "maxlength": "9"}},
{"publisher" : {"class": "input-xlarge"}},
{"society_institution" : {"class": "input-xlarge"}},
{"platform" : {"class": "input-xlarge"}},
{"contact_name" : {}},
{"contact_email" : {}},
{"confirm_contact_email" : {}},
{"country" : {"class": "input-large"}},
{"processing_charges" : {}},
{"processing_charges_url" : {"class": "input-xlarge"}},
{"processing_charges_amount" : {"class": "input-mini"}},
{"processing_charges_currency" : {"class": "input-large"}},
{"submission_charges" : {}},
{"submission_charges_url" : {"class": "input-xlarge"}},
{"submission_charges_amount" : {"class": "input-mini"}},
{"submission_charges_currency" : {"class": "input-large"}},
{"waiver_policy" : {}},
{"waiver_policy_url" : {"class": "input-xlarge"}},
{
"digital_archiving_policy" : {
"extra_input_fields" : {
Choices.digital_archiving_policy_val("other") : "digital_archiving_policy_other",
Choices.digital_archiving_policy_val("library") : "digital_archiving_policy_library"
}
}
},
{"digital_archiving_policy_url" : {"class": "input-xlarge"}},
{"crawl_permission" : {}},
{
"article_identifiers" : {
"extra_input_fields": {
Choices.article_identifiers_val("other") : "article_identifiers_other"
}
}
},
{"download_statistics" : {}},
{"download_statistics_url" : {"class": "input-xlarge"}},
{"first_fulltext_oa_year" : {"class": "input-mini"}},
{
"fulltext_format" : {
"extra_input_fields": {
Choices.fulltext_format_val("other") : "fulltext_format_other"
}
}
},
{"keywords" : {"class": "input-xlarge"}},
{"languages" : {"class": "input-xlarge"}}
],
"editorial_process" : [
{"editorial_board_url" : {"class": "input-xlarge"}},
{"review_process" : {"class" : "form-control input-xlarge"}},
{"review_process_url" : {"class": "input-xlarge"}},
{"aims_scope_url" : {"class": "input-xlarge"}},
{"instructions_authors_url" : {"class": "input-xlarge"}},
{"plagiarism_screening" : {}},
{"plagiarism_screening_url" : {"class": "input-xlarge"}},
{"publication_time" : {"class": "input-tiny"}}
],
"openness" : [
{"oa_statement_url" : {"class": "input-xlarge"}}
],
"content_licensing" : [
{"license_embedded" : {}},
{"license_embedded_url" : {"class": "input-xlarge"}},
{
"license" : {
"extra_input_fields": {
Choices.licence_val("other") : "license_other"
}
}
},
{"license_checkbox" : {}},
{"license_url" : {"class": "input-xlarge"}},
{"open_access" : {}},
{
"deposit_policy" : {
"extra_input_fields": {
Choices.open_access_val("other") : "deposit_policy_other"
}
}
}
],
"copyright" : [
{
"copyright" : {}
},
{"copyright_url" : {"class": "input-xlarge"}},
{
"publishing_rights" : {}
},
{"publishing_rights_url" : {"class": "input-xlarge"}}
]
}
def check_field_groups(self):
'''
Check whether field groups which are being referenced in various renderer lists actually exist.
Should only be called in self.__init__ by non-abstract classes,
i.e. the bottom of the inheritance tree, the ones that would
actually get used to render forms.
Otherwise the check becomes meaningless (and always fails) as it will check whether
all groups are defined in a class that isn't supposed to have all
the definitions - being abstract, it may only have a few common ones.
'''
for group in self.NUMBERING_ORDER:
try:
self.FIELD_GROUPS[group]
except KeyError as e:
raise KeyError(
'Can\'t number a group which does not exist. '
'Field group "{0}" is not defined in self.FIELD_GROUPS '
'but is present in self.NUMBERING_ORDER. '
'This is in renderer {1}.'.format(str(e), self.__class__.__name__)
)
for group in self.ERROR_CHECK_ORDER:
try:
self.FIELD_GROUPS[group]
except KeyError as e:
raise KeyError(
'Can\'t check a group which does not exist for errors. '
'Field group "{0}" is not defined in self.FIELD_GROUPS '
'but is present in self.ERROR_CHECK_ORDER. '
'This is in renderer {1}.'.format(str(e), self.__class__.__name__)
)
def number_questions(self):
q = 1
for g in self.NUMBERING_ORDER:
cfg = self.FIELD_GROUPS.get(g)
for obj in cfg:
field = list(obj.keys())[0]
obj[field]["q_num"] = str(q)
q += 1
def question_number(self, field):
for g in self.FIELD_GROUPS:
cfg = self.FIELD_GROUPS.get(g)
for obj in cfg:
f = list(obj.keys())[0]
if f == field and "q_num" in obj[f]:
return obj[f]["q_num"]
return ""
def set_error_fields(self, fields):
super(BasicJournalInformationRenderer, self).set_error_fields(fields)
# find the first error in the form and tag it
found = False
for g in self.ERROR_CHECK_ORDER:
cfg = self.FIELD_GROUPS.get(g)
# If a group is specified as part of the error checks but is
# not defined in self.FIELD_GROUPS then do not try to check
# it for errors - there are no fields to check.
if cfg:
for obj in cfg:
field = list(obj.keys())[0]
if field in self.error_fields:
obj[field]["first_error"] = True
found = True
break
if found:
break
class ApplicationRenderer(BasicJournalInformationRenderer):
def __init__(self):
super(ApplicationRenderer, self).__init__()
# allow the subclass to define the order the groups should be considered in. This is useful for
# numbering questions and determining first errors
self.NUMBERING_ORDER.append("submitter_info")
self.ERROR_CHECK_ORDER = deepcopy(self.NUMBERING_ORDER) # in this case these can be the same
self.FIELD_GROUPS["submitter_info"] = [
{"suggester_name" : {"label_width" : 5}},
{"suggester_email" : {"label_width" : 5, "class": "input-xlarge"}},
{"suggester_email_confirm" : {"label_width" : 5, "class": "input-xlarge"}},
]
self.insert_field_after(
field_to_insert={"articles_last_year" : {"class": "input-mini"}},
after_this_field="submission_charges_currency",
field_group="basic_info"
)
self.insert_field_after(
field_to_insert={"articles_last_year_url" : {"class": "input-xlarge"}},
after_this_field="articles_last_year",
field_group="basic_info"
)
self.insert_field_after(
field_to_insert={"metadata_provision" : {}},
after_this_field="article_identifiers",
field_group="basic_info"
)
class PublicApplicationRenderer(ApplicationRenderer):
def __init__(self):
super(PublicApplicationRenderer, self).__init__()
# explicitly call number questions, as it is not called by default (because other implementations may want
# to mess with the group order and field groups first)
self.number_questions()
self.check_field_groups()
class PublisherUpdateRequestRenderer(ApplicationRenderer):
def __init__(self):
super(PublisherUpdateRequestRenderer, self).__init__()
self.NUMBERING_ORDER.remove("submitter_info")
self.ERROR_CHECK_ORDER = deepcopy(self.NUMBERING_ORDER)
del self.FIELD_GROUPS["submitter_info"]
# explicitly call number questions, as it is not called by default (because other implementations may want
# to mess with the group order and field groups first
self.number_questions()
self.check_field_groups()
self._highlight_completable_fields = True
class PublisherUpdateRequestReadOnlyRenderer(ApplicationRenderer):
def __init__(self):
super(PublisherUpdateRequestReadOnlyRenderer, self).__init__()
self.ERROR_CHECK_ORDER = []
self.number_questions()
self.check_field_groups()
class ManEdApplicationReviewRenderer(ApplicationRenderer):
def __init__(self):
super(ManEdApplicationReviewRenderer, self).__init__()
# extend the list of field groups
self.FIELD_GROUPS["status"] = [
{"application_status" : {"class" : "form-control input-large"}}
]
self.FIELD_GROUPS["account"] = [
{"owner" : {"class" : "input-large"}}
]
self.FIELD_GROUPS["subject"] = [
{"subject" : {}}
]
self.FIELD_GROUPS["editorial"] = [
{"editor_group" : {"class" : "input-large"}},
{"editor" : {"class" : "form-control input-large"}},
]
self.FIELD_GROUPS["notes"] = [
{
"notes" : {
"render_subfields_horizontal" : True,
"container_class" : "deletable",
"subfield_display-note" : "8",
"subfield_display-date" : "3",
"label_width" : 0
}
}
]
self.FIELD_GROUPS["seal"] = [
{"doaj_seal" : {}}
]
self.FIELD_GROUPS["continuations"] = [
{"replaces" : {"class": "input-xlarge"}},
{"is_replaced_by" : {"class": "input-xlarge"}},
{"discontinued_date" : {}}
]
self.ERROR_CHECK_ORDER = ["status", "account", "editorial", "continuations", "subject"] + self.ERROR_CHECK_ORDER + ["notes"] # but do NOT include the new groups in self.NUMBERING_ORDER, don"t want them numbered
self.number_questions()
self.check_field_groups()
self._highlight_completable_fields = True
class EditorApplicationReviewRenderer(ApplicationRenderer):
def __init__(self):
super(EditorApplicationReviewRenderer, self).__init__()
# extend the list of field groups
self.FIELD_GROUPS["status"] = [
{"application_status" : {"class" : "form-control input-large"}}
]
self.FIELD_GROUPS["subject"] = [
{"subject" : {}}
]
self.FIELD_GROUPS["editorial"] = [
{"editor_group" : {"class" : "input-large"}},
{"editor" : {"class" : "form-control input-large"}},
]
self.FIELD_GROUPS["notes"] = [
{
"notes" : {
"render_subfields_horizontal" : True,
"subfield_display-note" : "8",
"subfield_display-date" : "3",
"label_width" : 0
}
}
]
self.ERROR_CHECK_ORDER = ["status", "editorial", "subject"] + self.ERROR_CHECK_ORDER + ["notes"]
# don"t want the extra groups numbered so not added to self.NUMBERING_ORDER
self.number_questions()
self.check_field_groups()
self._highlight_completable_fields = True
class AssEdApplicationReviewRenderer(ApplicationRenderer):
def __init__(self):
super(AssEdApplicationReviewRenderer, self).__init__()
# extend the list of field groups
self.FIELD_GROUPS["status"] = [
{"application_status" : {"class" : "form-control input-large"}}
]
self.FIELD_GROUPS["subject"] = [
{"subject" : {}}
]
self.FIELD_GROUPS["notes"] = [
{
"notes" : {
"render_subfields_horizontal" : True,
"subfield_display-note" : "8",
"subfield_display-date" : "3",
"label_width" : 0
}
}
]
self.ERROR_CHECK_ORDER = ["status", "subject"] + self.ERROR_CHECK_ORDER + ["notes"]
self.number_questions()
self.check_field_groups()
self._highlight_completable_fields = True
class JournalRenderer(BasicJournalInformationRenderer):
def __init__(self):
super(JournalRenderer, self).__init__()
self.FIELD_GROUPS["subject"] = [
{"subject" : {}}
]
self.FIELD_GROUPS["old_journal_fields"] = [
{"author_pays": {}},
{"author_pays_url": {"class": "input-xlarge"}},
{"oa_end_year": {"class": "input-mini"}},
]
def render_field_group(self, form_context, field_group_name=None, **kwargs):
if field_group_name == "old_journal_fields":
display_old_journal_fields = False
for old_field_def in self.FIELD_GROUPS["old_journal_fields"]:
old_field_name = list(old_field_def.keys())[0]
old_field = getattr(form_context.form, old_field_name)
if old_field:
if old_field.data and old_field.data != 'None':
display_old_journal_fields = True
if not display_old_journal_fields:
return ""
# otherwise let it fall through and render the old journal fields
return super(JournalRenderer, self).render_field_group(form_context, field_group_name, **kwargs)
class ManEdJournalReviewRenderer(JournalRenderer):
def __init__(self):
super(ManEdJournalReviewRenderer, self).__init__()
# extend the list of field groups
self.FIELD_GROUPS["account"] = [
{"owner" : {"class" : "input-large"}}
]
self.FIELD_GROUPS["editorial"] = [
{"editor_group" : {"class" : "input-large"}},
{"editor" : {"class" : "form-control input-large"}},
]
self.FIELD_GROUPS["notes"] = [
{
"notes" : {
"render_subfields_horizontal" : True,
"container_class" : "deletable",
"subfield_display-note" : "8",
"subfield_display-date" : "3",
"label_width" : 0
}
}
]
self.FIELD_GROUPS["make_all_fields_optional"] = [
{"make_all_fields_optional": {}}
]
self.FIELD_GROUPS["seal"] = [
{"doaj_seal" : {}}
]
self.FIELD_GROUPS["continuations"] = [
{"replaces" : {"class": "input-xlarge"}},
{"is_replaced_by" : {"class": "input-xlarge"}},
{"discontinued_date" : {}}
]
self.ERROR_CHECK_ORDER = ["make_all_fields_optional", "account", "editorial", "continuations", "subject"] + self.ERROR_CHECK_ORDER + ["notes"]
self.number_questions()
self.check_field_groups()
self._highlight_completable_fields = True
class ManEdJournalBulkEditRenderer(Renderer):
def __init__(self):
super(ManEdJournalBulkEditRenderer, self).__init__()
self.FIELD_GROUPS = {
"main" : [
{"publisher" : {"class": "input-xlarge"}},
{"platform" : {"class": "input-xlarge"}},
{"country" : {"class": "input-large"}},
{"owner" : {"class" : "input-large"}},
{"contact_name" : {"class" : "input-large"}},
{"contact_email" : {"class" : "input-large"}},
{"doaj_seal" : {"class" : "form-control input-large"}}
]
}
class EditorJournalReviewRenderer(JournalRenderer):
def __init__(self):
self.display_old_journal_fields = False # an instance var flag for the template
super(EditorJournalReviewRenderer, self).__init__()
self.FIELD_GROUPS["editorial"] = [
{"editor_group" : {"class" : "input-large"}},
{"editor" : {"class" : "form-control input-large"}},
]
self.FIELD_GROUPS["notes"] = [
{
"notes" : {
"render_subfields_horizontal" : True,
"subfield_display-note" : "8",
"subfield_display-date" : "3",
"label_width" : 0
}
}
]
self.ERROR_CHECK_ORDER = ["editorial", "subject"] + self.ERROR_CHECK_ORDER + ["notes"]
# don't want the extra groups numbered so not added to self.NUMBERING_ORDER
self.number_questions()
self.check_field_groups()
self._highlight_completable_fields = True
class AssEdJournalReviewRenderer(JournalRenderer):
def __init__(self):
super(AssEdJournalReviewRenderer, self).__init__()
# extend the list of field groups
self.FIELD_GROUPS["notes"] = [
{
"notes" : {
"render_subfields_horizontal" : True,
"subfield_display-note" : "8",
"subfield_display-date" : "3",
"label_width" : 0
}
}
]
self.ERROR_CHECK_ORDER = ["subject"] + self.ERROR_CHECK_ORDER + ["notes"]
self.number_questions()
self.check_field_groups()
self._highlight_completable_fields = True
class ReadOnlyJournalRenderer(JournalRenderer):
def __init__(self):
super(ReadOnlyJournalRenderer, self).__init__()
# extend the list of field groups
self.FIELD_GROUPS["notes"] = [
{
"notes" : {
"render_subfields_horizontal" : True,
"subfield_display-note" : "8",
"subfield_display-date" : "3",
"label_width" : 0
}
}
]
self.ERROR_CHECK_ORDER = []
self.number_questions()
self.check_field_groups()
| StarcoderdataPython |
1754504 | <reponame>Andre-Williams22/SPD-2.31-Testing-and-Architecture
def binary_search(arr, element, low=0, high=None):
"""Returns the index of the given element within the array by
performing a binary search.
"""
if high == None:
high = len(arr) - 1
if high < low:
return -1
mid = (high + low) // 2
if arr[mid] == element:
return mid
elif arr[mid] > element:
return binary_search(arr, element, low, mid-1)
else:
return binary_search(arr, element, mid+1, high)
if __name__ == '__main__':
print('### Binary Sort ###')
answer = binary_search([1, 2, 4, 5, 7], 7)
print(answer) | StarcoderdataPython |
60335 | # Python imports
# Third party imports
import boto3
# Self imports
class DjangoCloudWatchHandler:
def __init__(
self,
log_level: str,
log_group_name: str,
cloud_watch_aws_id: str,
cloud_watch_aws_key: str,
cloud_watch_aws_region: str,
) -> None:
self.log_level = log_level
self.log_group_name = log_group_name
self.cloud_watch_aws_id = cloud_watch_aws_id
self.cloud_watch_aws_key = cloud_watch_aws_key
self.cloud_watch_aws_region = cloud_watch_aws_region
def get_handler_data(self) -> dict:
boto3_logs_client = self._get_boto_client()
cloud_watch_handler = {
'level': self.log_level,
'class': 'watchtower.CloudWatchLogHandler',
'boto3_client': boto3_logs_client,
'log_group_name': self.log_group_name,
'formatter': 'aws',
}
return cloud_watch_handler
def _get_boto_client(self) -> object:
boto3_logs_client = boto3.client(
'logs',
aws_access_key_id=self.cloud_watch_aws_id,
aws_secret_access_key=self.cloud_watch_aws_key,
region_name=self.cloud_watch_aws_region
)
return boto3_logs_client | StarcoderdataPython |
252498 | import bs4
from ...travparse import parsebuild
from . import building
# Точка сбора
class RallyPoint(building.Building):
def __init__(self, village_part, name, id, level):
building.Building.__init__(self, village_part, name, id, level)
self.eng_name = 'rallypoint'
def post(self, url, params, data):
pass
def get_incoming(self):
html = self.village_part.get_html({'id': self.id, 'tt': 1})
soup = bs4.BeautifulSoup(html, 'html5lib')
return parsebuild.rallypoint.parse_troops(soup)['incoming']
incoming = property(get_incoming)
def get_outgoing(self):
html = self.village_part.get_html({'id': self.id, 'tt': 1})
soup = bs4.BeautifulSoup(html, 'html5lib')
return parsebuild.rallypoint.parse_troops(soup)['outgoing']
outgoing = property(get_outgoing)
def get_in_village(self):
html = self.village_part.get_html({'id': self.id, 'tt': 1})
soup = bs4.BeautifulSoup(html, 'html5lib')
return parsebuild.rallypoint.parse_troops(soup)['in_village']
in_village = property(get_in_village)
def reinforcement(self):
pass
def attack_normal(self):
pass
def attack_raid(self, pos):
pass
def step_1(self, pos, troops, c=4):
send_troops_page = 2
html = self.village_part.village.login.get_html('build.php', {'id': self.id, 'tt': send_troops_page})
soup = bs4.BeautifulSoup(html, 'html5lib')
div_build = soup.find('div', {'id': 'build'})
data = dict()
data['x'] = pos[0]
data['y'] = pos[1]
data['c'] = c
data['timestamp'] = div_build.find('input', {'name': 'timestamp'})['value']
data['timestamp_checksum'] = div_build.find('input', {'name': 'timestamp_checksum'})['value']
data['b'] = div_build.find('input', {'name': 'b'})['value']
data['currentDid'] = div_build.find('input', {'name': 'currentDid'})['value']
data.update(troops)
# data['t5'] = 5
data['s1'] = 'ok'
self.step_2(data, troops)
def step_2(self, data, troops):
send_troops_page = 2
params = {'id': self.id, 'tt': send_troops_page}
response = self.village_part.village.login.post('build.php', data=data, params=params)
html = response.text
soup = bs4.BeautifulSoup(html, 'html5lib')
div_build = soup.find('div', {'id': 'build'})
# data = {}
data['timestamp'] = div_build.find('input', {'name': 'timestamp'})['value']
data['timestamp_checksum'] = div_build.find('input', {'name': 'timestamp_checksum'})['value']
input_id = div_build.find('input', {'name': 'id'})
if not input_id:
return # some problem
data['id'] = input_id['value']
data['a'] = div_build.find('input', {'name': 'a'})['value']
data['c'] = div_build.find('input', {'name': 'c'})['value']
data['kid'] = div_build.find('input', {'name': 'kid'})['value']
for i in range(1, 12):
data['t%s' % (i,)] = int(div_build.find('input', {'name': 't%s' % (i,)})['value'])
# костыль:
for key in troops:
if data[key] < troops[key]:
return
data['sendReally'] = div_build.find('input', {'name': 'sendReally'})['value']
data['troopsSent'] = div_build.find('input', {'name': 'troopsSent'})['value']
data['currentDid'] = div_build.find('input', {'name': 'currentDid'})['value']
data['b'] = div_build.find('input', {'name': 'b'})['value']
data['dname'] = div_build.find('input', {'name': 'dname'})['value']
data['x'] = div_build.find('input', {'name': 'x'})['value']
data['y'] = div_build.find('input', {'name': 'y'})['value']
response = self.village_part.village.login.post('build.php', data=data, params=params)
html = response.text
def send_troops(self, pos, troops={'t5': 5}, c=4):
# c = 2 # Reinforcement
# c = 3 # Attack: Normal
# c = 4 # Attack: Raid
print(self.village_part.village.name, 'raid to', pos)
self.step_1(pos, troops, c)
| StarcoderdataPython |
6410197 | # coding: utf-8
from __future__ import annotations
import collections.abc
import os
import string
import itertools
import more_itertools
import dateparser
import ebooklib
import ebooklib.epub
import fitz
from contextlib import suppress
from io import StringIO
from functools import cached_property, lru_cache
from pathlib import Path, PurePosixPath
from urllib import parse as urllib_parse
from diskcache import Cache
from lxml import html as lxml_html
from selectolax.parser import HTMLParser
from bookworm.i18n import LocaleInfo
from bookworm.structured_text import TextRange
from bookworm.structured_text.structured_html_parser import StructuredHtmlParser
from bookworm.utils import is_external_url, format_datetime
from bookworm.paths import home_data_path
from bookworm.image_io import ImageIO
from bookworm.logger import logger
from .. import (
SinglePageDocument,
BookMetadata,
Section,
LinkTarget,
DocumentCapability as DC,
TreeStackBuilder,
ChangeDocument,
DocumentError,
SINGLE_PAGE_DOCUMENT_PAGER,
)
log = logger.getChild(__name__)
HTML_FILE_EXTS = {
".html",
".xhtml",
}
class EpubDocument(SinglePageDocument):
format = "epub"
# Translators: the name of a document file format
name = _("Electronic Publication (EPUB)")
extensions = ("*.epub",)
capabilities = (
DC.TOC_TREE
| DC.METADATA
| DC.SINGLE_PAGE
| DC.STRUCTURED_NAVIGATION
| DC.TEXT_STYLE
| DC.ASYNC_READ
| DC.LINKS
| DC.INTERNAL_ANCHORS
)
def read(self):
super().read()
self.epub = ebooklib.epub.read_epub(self.get_file_system_path())
self.html_content = self.html_content
self.structure = StructuredHtmlParser.from_string(self.html_content)
self.toc = self.parse_epub()
@property
def toc_tree(self):
return self.toc
@cached_property
def epub_metadata(self):
info = {}
for md in tuple(self.epub.metadata.values()):
info.update(md)
return {k: v[0][0] for k, v in info.items()}
@cached_property
def metadata(self):
info = self.epub_metadata
author = info.get("creator", "") or info.get("author", "")
try:
desc = HTMLParser(info.get("description", "")).text()
except:
desc = None
if pubdate := dateparser.parse(
info.get("date", ""),
languages=[
self.language.two_letter_language_code,
],
):
publish_date = self.language.format_datetime(
pubdate, date_only=True, format="long", localized=True
)
else:
publish_date = ""
return BookMetadata(
title=self.epub.title,
author=author.removeprefix("By ").strip(),
description=desc,
publication_year=publish_date,
publisher=info.get("publisher", ""),
)
@cached_property
def language(self):
if (epub_lang := self.epub_metadata.get("language")) is not None:
try:
return LocaleInfo(epub_lang)
except:
log.exception(
"Failed to parse epub language `{epub_lang}`", exc_info=True
)
return self.get_language(self.html_content, is_html=True) or LocaleInfo("en")
def get_content(self):
return self.structure.get_text()
def get_document_semantic_structure(self):
return self.structure.semantic_elements
def get_document_style_info(self):
return self.structure.styled_elements
def resolve_link(self, link_range) -> LinkTarget:
href = urllib_parse.unquote(self.structure.link_targets[link_range])
if is_external_url(href):
return LinkTarget(url=href, is_external=True)
else:
for (html_id, text_range) in self.structure.html_id_ranges.items():
if html_id.endswith(href):
return LinkTarget(
url=href, is_external=False, page=None, position=text_range
)
def get_cover_image(self):
if not (
cover := more_itertools.first(
self.epub.get_items_of_type(ebooklib.ITEM_COVER), None
)
):
cover = more_itertools.first(
filter(
lambda item: "cover" in item.file_name.lower(),
self.epub.get_items_of_type(ebooklib.ITEM_IMAGE),
),
None,
)
if cover:
return ImageIO.from_bytes(cover.content)
try:
with fitz.open(self.get_file_system_path()) as fitz_document:
return ImageIO.from_fitz_pixmap(fitz_document.get_page_pixmap(0))
except:
log.warning(
"Failed to obtain the cover image for epub document.", exc_info=True
)
@lru_cache(maxsize=10)
def get_section_at_position(self, pos):
for ((start, end), section) in self.start_positions_for_sections:
if start <= pos < end:
return section
return self.toc_tree
@cached_property
def epub_html_items(self) -> tuple[str]:
if html_items := tuple(self.epub.get_items_of_type(ebooklib.ITEM_DOCUMENT)):
return html_items
else:
return tuple(
filter(
lambda item: os.path.splitext(item.file_name)[1] in HTML_FILE_EXTS,
self.epub.items,
)
)
def get_epub_html_item_by_href(self, href):
if epub_item := self.epub.get_item_with_href("href"):
return epub_item
item_name = PurePosixPath(href).name
return more_itertools.first(
(
item
for item in self.epub_html_items
if PurePosixPath(item.file_name).name == item_name
),
None,
)
def parse_epub(self):
root = Section(
title=self.metadata.title,
pager=SINGLE_PAGE_DOCUMENT_PAGER,
level=1,
text_range=TextRange(0, len(self.get_content())),
)
id_ranges = {
urllib_parse.unquote(key): value
for (key, value) in self.structure.html_id_ranges.items()
}
stack = TreeStackBuilder(root)
toc_entries = self.epub.toc
if not isinstance(toc_entries, collections.abc.Iterable):
toc_entries = [
toc_entries,
]
for sect in self.add_toc_entry(toc_entries, root):
href = urllib_parse.unquote(sect.data["href"])
try:
sect.text_range = TextRange(*id_ranges[href])
except KeyError:
# Let's start the dance!
text_range = None
# Strip punctuation as ebooklib, for some reason, strips those from html_ids
for (h_id, t_range) in id_ranges.items():
if (href == h_id.strip("/")) or (
href == h_id.strip(string.punctuation)
):
text_range = t_range
break
if text_range is None and "#" in href:
filename = href.split("#")[0]
text_range = id_ranges.get(filename)
if text_range is None:
log.warning(
f"Could not determine the starting position for href: {href} and section: {sect!r}"
)
text_range = (
stack.top.text_range.astuple()
if stack.top is not root
else (0, 0)
)
sect.text_range = TextRange(*text_range)
stack.push(sect)
return root
@cached_property
def start_positions_for_sections(self):
sect_starting_poses = [
(0, self.toc_tree),
] + [(sect.text_range.start, sect) for sect in self.toc_tree.iter_children()]
data = list(
more_itertools.zip_offset(
sect_starting_poses, sect_starting_poses, offsets=(0, 1), longest=True
)
)
data[-1] = list(data[-1])
data[-1][1] = data[-1][0]
return [((i[0], j[0]), i[1]) for i, j in data]
def add_toc_entry(self, entries, parent):
for entry in entries:
current_level = parent.level + 1
if type(entry) is ebooklib.epub.Link:
sect = Section(
title=entry.title or self._get_title_for_section(entry.href),
pager=SINGLE_PAGE_DOCUMENT_PAGER,
level=current_level,
parent=parent,
data=dict(href=entry.href.lstrip("./")),
)
yield sect
else:
epub_sect, children = entry
num_pages = len(children)
sect = Section(
title=epub_sect.title
or self._get_title_for_section(epub_sect.href),
level=current_level,
pager=SINGLE_PAGE_DOCUMENT_PAGER,
parent=parent,
data=dict(href=epub_sect.href.lstrip("./")),
)
yield sect
yield from self.add_toc_entry(
children,
parent=sect,
)
@cached_property
def html_content(self):
cache = Cache(
self._get_cache_directory(), eviction_policy="least-frequently-used"
)
cache_key = self.uri.to_uri_string()
if cached_html_content := cache.get(cache_key):
return cached_html_content.decode("utf-8")
html_content_gen = (
(item.file_name, item.content) for item in self.epub_html_items
)
buf = StringIO()
for (filename, html_content) in html_content_gen:
buf.write(self.prefix_html_ids(filename, html_content))
buf.write("\n<br/>\n")
html_content = self.build_html(
title=self.epub.title, body_content=buf.getvalue()
)
cache.set(cache_key, html_content.encode("utf-8"))
return html_content
def prefix_html_ids(self, filename, html):
tree = lxml_html.fromstring(html)
tree.make_links_absolute(
filename, resolve_base_href=False, handle_failures="ignore"
)
if os.path.splitext(filename)[1] in HTML_FILE_EXTS:
for node in tree.xpath("//*[@id]"):
node.set("id", filename + "#" + node.get("id"))
try:
tree.remove(tree.head)
tree.body.tag = "section"
except:
pass
tree.tag = "div"
tree.insert(0, tree.makeelement("header", attrib={"id": filename}))
return lxml_html.tostring(tree, method="html", encoding="unicode")
def build_html(self, title, body_content):
return (
"<!doctype html>\n"
'<html class="no-js" lang="">\n'
"<head>\n"
'<meta charset="utf-8">'
f"<title>{title}</title>\n"
"</head>\n"
"<body>\n"
f"{body_content}\n"
"</body>\n"
"</html>"
)
def _get_title_for_section(self, href):
filename = href.split("#")[0] if "#" in href else href
html_doc = self.get_epub_html_item_by_href(filename)
if html_doc is not None:
if title_list := lxml_html.fromstring(html_doc.content).xpath(
"/html/head/title//text()"
):
return title_list[0]
else:
log.warning(f"Could not resolve href: {href}")
return ""
def _get_cache_directory(self):
return os.fspath(home_data_path(".parsed_epub_cache"))
| StarcoderdataPython |
12801486 | # Author: <NAME>
# 14 Feb 2019
# HPCC Brandeis
#This code will
#1) All files inside a given path that have not been accessed/modified/group_changed in the last threshod days
#2) Create a report of user owners and their deleted files
import os, time, datetime
import pwd
import sys
clear = lambda: os.system('clear')
clear()
# This function will return the group and user for a given file
class FILE:
def __init__(self, filepath):
self.path=filepath
self.user=pwd.getpwuid(os.stat(filepath).st_uid).pw_name
self.group=pwd.getpwuid(os.stat(filepath).st_uid).pw_name
def create_report(database, time_thresh, time_now, filepath):
date=str(time_now.year)+"-"+str(time_now.month)+"-"+str(time_now.day)
file = open("work_file_removal-"+date,"w")
file.write("#This file reports user/group owners of files on "+str(filepath)+" that have not been accessed in the last "+str(time_thresh)+" days.\n")
file.write("#Report Date: "+date+"\n")
file.write("#Format: user_owner total#_removed_files\n\n")
for key in database:
file.write("%s %d\n" %(key, database[key]))
file.close()
# This function will walk through all files in a given path recursively
def file_search(filepath, time_thresh,time_now):
database={}
for (dirpath, dirnames, filenames) in os.walk(filepath):
if dirpath.find('.snapshot') == -1:
for f in filenames:
if f[0] != '.':
# get the absolute path of the file
file=dirpath+'/'+f
# last time the file ownership was changed
last_own=os.stat(file).st_ctime # in date-hh-mm-ss format
time_own=time.ctime(last_own) # in seconds format
# last time the file was changed
last_mod=os.stat(file).st_mtime # in date-hh-mm-ss format
time_mod=time.ctime(last_mod) # in seconds format
# last time the file was accessed
last_acc=os.stat(file).st_atime # in date-hh-mm-ss format
time_acc=time.ctime(last_acc) # in seconds format
# convert current time to seconds
stamp_now=datetime.datetime.timestamp(time_now)
# find the time difference between now and the last file changes
diff_own = stamp_now - last_own # file owenership change
diff_mod = stamp_now - last_mod # file modification
diff_acc = stamp_now - last_acc # file access
# Find the minimum time difference between now and last file change
diff_min = min(diff_acc,diff_mod,diff_own) / (24 * 3600) # in days
# Find the latest time change of the file
time_max = max(time_acc,time_own,time_mod)
# Get the file ownership information
F = FILE(file)
# Count the number of files that each user/group has that has exceeded the criteria
if (diff_min > time_thresh):
if F.user in database:
database[F.user] += 1
else:
database[F.user] = 1
os.remove(file)
return database
def main():
# current time
time_now=datetime.datetime.now()
# time period criteria to check whether the last time the file was changed is beyond the time threshold
time_thresh=int(sys.argv[1]) # in days
# filepath
filepath=str(sys.argv[2])
# Run the file search function and create the database
database=file_search(filepath, time_thresh,time_now)
create_report(database,time_thresh,time_now,filepath)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8032842 | <reponame>1kko/ezDHLoader<gh_stars>0
import PySimpleGUI as sg
import pafy
from urllib.parse import urlparse, parse_qs
import time
import random
import configparser
import os
def output_callback(total, recvd, ratio, rate, eta):
global progress_bar
try:
# print(recvd, ratio, eta)
progress_bar.UpdateBar(ratio)
etaText = window['eta']
etaText.update("ETA: " + str(eta) + "s")
except:
pass
if __name__ == "__main__":
with_apikey = ""
try:
config = configparser.ConfigParser()
config.read('config.ini')
if config['DEFAULT']['YOUTUBE_API_KEY'] not in [None, '']:
pafy.set_api_key(config['DEFAULT']['YOUTUBE_API_KEY'])
with_apikey = " (API Key Enabled)"
except:
pass
layout = [
[sg.Text("Youtube URL:")],
[sg.InputText(size=(80, 20), key='url')],
[sg.Submit("OK"), sg.Cancel()],
[sg.ProgressBar(1, orientation='h', size=(
45, 5), key='progressbar'),
sg.Text(size=(12, 1), key='eta', justification='r')],
# [sg.Output(size=(80, 20))],
[sg.Text("Destination", size=(15, 1)), sg.InputText(os.getcwd(),
key='dstPath'), sg.FolderBrowse()],
]
window = sg.Window('ezDHLoader v0.6' + with_apikey, layout)
progress_bar = window['progressbar']
youtubeId = ""
event, values = window.read()
url = values['url']
try:
if url.startswith("http"):
res = urlparse(url)
if res.netloc == "www.youtube.com" or res.netloc == "youtube.com":
# Url starts with www.youtube.com
youtubeId = parse_qs(res.query)['v'][0]
if res.netloc == "youtu.be":
# Url starts with youtu.be
youtubeId = res.path[1:]
# download
y = pafy.new(youtubeId)
video = y.getbest()
vfilename = video.download(filepath=os.path.join(values['dstPath'], video.title+"."+video.extension),
quiet=True, callback=output_callback, remux_audio=True)
sg.Popup("Done")
except Exception as e:
sg.Popup("Oops", e)
| StarcoderdataPython |
3295070 | #-*- coding: utf-8 -*-
import json, ast
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from uni_form.helpers import FormHelper, Submit, Reset
from django.contrib.auth.decorators import login_required
from oauth2app.authorize import Authorizer, MissingRedirectURI, AuthorizationException
from oauth2app.authorize import UnvalidatedRequest, UnauthenticatedUser
from apps.oauth2.forms import AuthorizeForm
from apps.oauth2.authorization import SchemeAgnosticAuthorizer
from oauth2app.models import AccessRange
from oauth2app.authenticate import Authenticator, AuthenticationException, JSONAuthenticator
import pdb
@login_required
def missing_redirect_uri(request):
return render_to_response(
'oauth2/missing_redirect_uri.html',
{},
RequestContext(request))
def userinfo(request):
scope = AccessRange.objects.get(key="funf_write")
authenticator = JSONAuthenticator(scope=scope)
try:
# Validate the request.
authenticator.validate(request)
except AuthenticationException as e:
# Return an error response.
print e
return authenticator.error_response("You didn't authenticate.")
profile = authenticator.user.get_profile()
response_dict = {}
response_dict['id'] = profile.uuid
response_dict['email'] = profile.user.email
response_dict['name'] = profile.user.username
response_dict['pds_location'] = 'http://'+str(profile.pds_location).replace("http://", "")
return HttpResponse(json.dumps(response_dict), content_type='application/json')
@login_required
def authorize(request):
# pdb.set_trace()
CODE_AND_TOKEN = 3
authorizer = Authorizer(response_type=CODE_AND_TOKEN)
try:
authorizer.validate(request)
except MissingRedirectURI, e:
return HttpResponseRedirect("/oauth2/missing_redirect_uri")
except AuthorizationException, e:
# The request is malformed or invalid. Automatically
# redirects to the provided redirect URL.
return authorizer.error_redirect()
if request.method == 'GET':
# Make sure the authorizer has validated before requesting the client
# or access_ranges as otherwise they will be None.
template = {
"client":authorizer.client,
"access_ranges":authorizer.access_ranges}
template["form"] = AuthorizeForm()
helper = FormHelper()
no_submit = Submit('connect','No')
helper.add_input(no_submit)
yes_submit = Submit('connect', 'Yes')
helper.add_input(yes_submit)
helper.form_action = '/oauth2/authorize?%s' % authorizer.query_string
helper.form_method = 'POST'
template["helper"] = helper
return render_to_response(
'oauth2/authorize.html',
template,
RequestContext(request))
elif request.method == 'POST':
form = AuthorizeForm(request.POST)
if form.is_valid():
if request.POST.get("connect") == "Yes":
return authorizer.grant_redirect()
else:
return authorizer.error_redirect()
return HttpResponseRedirect("/")
@login_required
def grant(request):
CODE_AND_TOKEN=3
authorizer = SchemeAgnosticAuthorizer(response_type=CODE_AND_TOKEN)
try:
authorizer.validate(request)
except MissingRedirectURI as e:
print e
return HttpResponseRedirect("/oauth2/missing_redirect_uri")
except AuthorizationException, e:
# The request is malformed or invalid. Automatically
# redirects to the provided redirect URL.
return authorizer.error_redirect()
return authorizer.grant_redirect()
| StarcoderdataPython |
9662219 | # -*- coding: utf-8 -*-
import hoaxlyHelpers
import scrapyelasticsearch
# Scrapy settings for Hoaxlyspiders project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Hoaxlyspiders'
SPIDER_MODULES = ['Hoaxlyspiders.spiders']
NEWSPIDER_MODULE = 'Hoaxlyspiders.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Hoaxlyspiders.middlewares.HoaxlyspidersSpiderMiddleware': 543,
#}
SPIDER_MIDDLEWARES = {
'hoaxlyHelpers.mymiddleware.MicrodataExtruction': 643,
# 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Hoaxlyspiders.middlewares.HoaxlyspidersDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'Hoaxlyspiders.pipelines.HoaxlyspidersPipeline': 300,
#}
ITEM_PIPELINES = {
#'hoaxlyHelpers.mypipelines.TypePipeline': 400,
#'hoaxlyHelpers.indexpipeline.IndexPipeline': 500,
#'slybot.dupefilter.DupeFilterPipeline': 600,
#'scrapy.dupefilters.DupeFilterPipeline': 100,
'scrapyelasticsearch.scrapyelasticsearch.ElasticSearchPipeline': 900
}
# Polite Scraping
# see https://blog.scrapinghub.com/2016/08/25/how-to-crawl-the-web-politely-with-scrapy/
#
ROBOTSTXT_OBEY = True
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Hoaxly Factchecking Search engine bot (+<EMAIL>)'
# 55 second delay
DOWNLOAD_DELAY = 55.0
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# https://doc.scrapy.org/en/latest/topics/autothrottle.html?
AUTOTHROTTLE_ENABLED = True
HTTPCACHE_ENABLED = False
# limit concurrent requests per domain
CONCURRENT_REQUESTS_PER_DOMAIN = 7
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
ELASTICSEARCH_SERVERS = ['http://hoaxly-storage-container:9200']
ELASTICSEARCH_INDEX = 'hoaxly'
ELASTICSEARCH_TYPE = 'items'
ELASTICSEARCH_UNIQ_KEY = 'url'
ELASTICSEARCH_INDEX_DATE_FORMAT = '%Y-%m'
ELASTICSEARCH_PASSWORD = '<PASSWORD>'
ELASTICSEARCH_USERNAME = 'elastic'
| StarcoderdataPython |
9723161 | <reponame>DarkSkull777/Anime-DL-Bot
# Copyright © 2021 BaraniARR
# Encoding = 'utf-8'
# Licensed under MIT License
# Special Thanks for gogoanime
from pyrogram import *
from pyrogram.types import *
import requests
from requests_html import HTMLSession
from bs4 import BeautifulSoup
# Splits Inline buttons into ranges of episodes when Episodes counts is greater than 120
def get_epIndex(client, callback_query):
global list_more_anime
query = callback_query
data = query.data
query.answer("Mengambil Episode...")
data_spl = data.split("_")
# print(data_spl)
animelink = f'https://gogoanime.ai/category/{data_spl[1]}'
response = requests.get(animelink)
plainText = response.text
soup = BeautifulSoup(plainText, "lxml")
tit_url = soup.find("div", {"class": "anime_info_body_bg"}).h1.string
lnk = soup.find(id="episode_page")
try:
source_url = lnk.findAll("li")
for link in source_url:
list_more_anime = []
list_more_anime.append(link.a)
ep_num_tot = list_more_anime[0].get("ep_end")
ep_num_tot_range = int(ep_num_tot) + 1
if int(ep_num_tot) > 120:
listInitial = []
for i in range(1, ep_num_tot_range):
listInitial.append(i)
n = 40
listOrganisedInitial = [listInitial[i:i + n] for i in range(0, len(listInitial), n)]
listIndex = []
for item in listOrganisedInitial:
listIndex.append(
(InlineKeyboardButton(f"{item[0]}-{item.pop()}",
callback_data=f"eplink_{data_spl[1]}_{listOrganisedInitial.index(item)}")))
o = 3
listIndexFinal = [listIndex[i:i + o] for i in range(0, len(listIndex), o)]
listIndexFinal.append([InlineKeyboardButton("◀️ Kembali", callback_data=f"dt_{data_spl[1]}")])
repl = InlineKeyboardMarkup(listIndexFinal)
# print(listIndex)
query.edit_message_text(text=f"""Anda memilih **{tit_url}**,
Pilih Episode yang Anda inginkan :-""", reply_markup=repl, parse_mode="markdown")
elif int(ep_num_tot) < 120:
source_url = lnk.find("li").a
ep_num_tot = source_url.get("ep_end")
ep_num_tot_range = int(ep_num_tot) + 1
n = 5
keyb_eps = []
for i in range(1, ep_num_tot_range):
keyb_eps.append((InlineKeyboardButton(f'{i}', callback_data=f"eps_{i}_{data_spl[1]}")))
keybrd_inline_butt = [keyb_eps[i:i + n] for i in range(0, len(keyb_eps), n)]
reply_markups = InlineKeyboardMarkup(keybrd_inline_butt)
query.edit_message_text(text=f"""Anda memilih **{tit_url}**,
Pilih Episode yang Anda inginkan :-""", reply_markup=reply_markups, parse_mode="markdown")
except:
source_url = lnk.find("li").a
ep_num_tot = source_url.get("ep_end")
ep_num_tot_range = int(ep_num_tot) + 1
n = 5
keyb_eps = []
for i in range(1, ep_num_tot_range):
keyb_eps.append((InlineKeyboardButton(f'{i}', callback_data=f"eps_{i}_{data_spl[1]}")))
keybrd_inline_butt = [keyb_eps[i:i + n] for i in range(0, len(keyb_eps), n)]
reply_markups = InlineKeyboardMarkup(keybrd_inline_butt)
query.edit_message_text(text=f"""Anda memilih **{tit_url}**,
Pilih Episode yang Anda inginkan :-""", reply_markup=reply_markups, parse_mode="markdown") | StarcoderdataPython |
1778792 | from pylab import *
import numpy as np
from scipy.io.netcdf import netcdf_file
def make_plot(DIR_INP, FILTER, CUTS, nBin):
MCwant = FILTER['MCwant']
fgap = FILTER['fgap']
WangFlag = FILTER['WangFlag']
FNAME = 'MCflag%s_%dbefore.%dafter_fgap%1.1f' % (MCwant, nBin['before'], nBin['after'], fgap)
FNAME += '_Wang%s' % (WangFlag)
if FILTER['vsw_filter']:
FNAME += '_vlo.%03.1f.vhi.%04.1f' % (CUTS['v_lo'], CUTS['v_hi'])
if FILTER['z_filter_on']:
FNAME += '_zlo.%2.2f.zhi.%2.2f' % (CUTS['z_lo'], CUTS['z_hi'])
if FILTER['B_filter']:
FNAME += '_Blo.%2.2f.Bhi.%2.2f' % (CUTS['B_lo'], CUTS['B_hi'])
if FILTER['filter_dR.icme']:
FNAME += '_dRlo.%2.2f.dRhi.%2.2f' % (CUTS['dR_lo'], CUTS['dR_hi'])
fname_inp = DIR_INP + '/' + '_stuff_' + FNAME + '.nc'
finp = netcdf_file(fname_inp, 'r')
#print finp.variables; finp.close()
VARNAMES = finp.variables.keys()
prom = {}
for varname in VARNAMES:
if varname[:2]=='dt' or varname[:2]=='ID':
continue # estos no los quiero
mvs = finp.variables[varname].data
prom[varname] = np.mean(mvs)
del mvs # borramos referencias al archivo
finp.close()
return prom
| StarcoderdataPython |
6466055 | <reponame>benjohnsonnlp/robosquirt
import logging
from django.apps import AppConfig
class GeoConfig(AppConfig):
logger = logging.getLogger("moistmaster")
name = 'geo'
| StarcoderdataPython |
1872361 | from __future__ import absolute_import, print_function, division
from functools import partial
from itertools import product
import numpy as np
from six.moves import xrange
from theano import tensor as T
import theano
import theano.tensor.tests.test_extra_ops
from theano.tensor.extra_ops import CumOp
from theano.tests.unittest_tools import SkipTest
from theano.tests import unittest_tools as utt
from .config import mode_with_gpu, test_ctx_name
from ..extra_ops import GpuCumOp
from ..type import get_context
cum_modes = utt.parameterized.expand([('mul',), ('add',)])
class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
mode = mode_with_gpu
def setUp(self):
super(TestGpuCumOp, self).setUp()
test_ctx = get_context(test_ctx_name)
if test_ctx.kind != b'cuda':
raise SkipTest("Cuda specific tests")
self.max_threads_dim0 = test_ctx.maxlsize0
self.max_grid_size1 = test_ctx.maxgsize2
self.op_class = CumOp
@cum_modes
def test_infer_shape(self, mode):
# GpuCumOp is only defined for float32 for now, so we skip it
# in the unsupported cases
op_class = partial(self.op_class, mode=mode)
gpucumop_supported_dtypes = ('float32',)
if theano.config.floatX not in gpucumop_supported_dtypes:
raise SkipTest('Gpucumop not implemented for dtype %s'
% theano.config.floatX)
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(theano.config.floatX)
for axis in range(-len(a.shape), len(a.shape)):
self._compile_and_check([x],
[op_class(axis=axis)(x)],
[a],
GpuCumOp)
@cum_modes
def test_grad(self, mode):
# no grad for GpuCumOp
pass
@cum_modes
def test_Strides1D(self, mode):
op_class = partial(self.op_class, mode=mode)
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
x = T.fvector('x')
for axis in [0, None, -1]:
a = np.random.random((42,)).astype("float32")
cumop_function = theano.function(
[x], op_class(axis=axis)(x), mode=self.mode)
slicings = [slice(None, None, None), # Normal strides
slice(None, None, 2), # Stepped strides
slice(None, None, -1), # Negative strides
]
# Cartesian product of all slicings to test.
for slicing in product(slicings, repeat=x.ndim):
f = theano.function([x], op_class(axis=axis)(x[slicing]),
mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumOp)]
utt.assert_allclose(np_func(a[slicing], axis=axis), f(a))
utt.assert_allclose(np_func(a[slicing], axis=axis),
cumop_function(a[slicing]))
@cum_modes
def test_Strides2D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
x = T.fmatrix('x')
for axis in [0, 1, None, -1, -2]:
a = np.random.random((42, 30)).astype("float32")
cumop_function = theano.function(
[x], op_class(axis=axis)(x), mode=self.mode)
slicings = [slice(None, None, None), # Normal strides
slice(None, None, 2), # Stepped strides
slice(None, None, -1), # Negative strides
]
# Cartesian product of all slicings to test.
for slicing in product(slicings, repeat=x.ndim):
f = theano.function([x], op_class(axis=axis)(x[slicing]),
mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumOp)]
utt.assert_allclose(np_func(a[slicing], axis=axis), f(a))
utt.assert_allclose(np_func(a[slicing], axis=axis),
cumop_function(a[slicing]))
@cum_modes
def test_Strides3D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
x = T.ftensor3('x')
for axis in [0, 1, 2, None, -1, -2, -3]:
a = np.random.random((42, 30, 25)).astype("float32")
cumop_function = theano.function(
[x], op_class(axis=axis)(x), mode=self.mode)
slicings = [slice(None, None, None), # Normal strides
slice(None, None, 2), # Stepped strides
slice(None, None, -1), # Negative strides
]
# Cartesian product of all slicings to test.
for slicing in product(slicings, repeat=x.ndim):
f = theano.function(
[x], op_class(axis=axis)(x[slicing]), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumOp)]
utt.assert_allclose(np_func(a[slicing], axis=axis), f(a))
utt.assert_allclose(np_func(a[slicing], axis=axis),
cumop_function(a[slicing]))
@cum_modes
def test_GpuCumOp1D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
block_max_size = self.max_threads_dim0 * 2
x = T.fvector('x')
f = theano.function([x], op_class(axis=0)(x), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumOp)]
# Extensive testing for the first 1025 sizes
a = np.random.random(1025).astype("float32")
for i in xrange(a.shape[0]):
utt.assert_allclose(np_func(a[:i]), f(a[:i]))
# Use multiple GPU threadblocks
a = np.random.random((block_max_size + 2, )).astype("float32")
utt.assert_allclose(np_func(a), f(a))
# Use recursive cumop
a = np.ones((block_max_size * (block_max_size + 1) + 2,),
dtype="float32")
utt.assert_allclose(np_func(a), f(a))
@cum_modes
def test_GpuCumOp2D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
block_max_size = self.max_threads_dim0 * 2
x = T.fmatrix('x')
for shape_axis, axis in zip([0, 1, 0, 1, 0], [0, 1, None, -1, -2]):
f = theano.function([x], op_class(axis=axis)(x), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumOp)]
# Extensive testing for the first 1025 sizes
a_shape = [5, 5]
a_shape[shape_axis] = 1025
a = np.random.random(a_shape).astype("float32")
slices = [slice(None), slice(None)]
for i in xrange(a.shape[shape_axis]):
slices[shape_axis] = slice(i)
fa = f(a[slices])
npa = np_func(a[slices], axis=axis)
utt.assert_allclose(npa, fa)
# Use multiple GPU threadblocks
a_shape = [5, 5]
a_shape[shape_axis] = block_max_size + 2
a = np.random.random(a_shape).astype("float32")
utt.assert_allclose(np_func(a, axis=axis), f(a))
# Use multiple GPU gridblocks
a_shape = [4, 4]
a_shape[1 - shape_axis] = self.max_grid_size1 + 1
a = np.random.random(a_shape).astype("float32")
utt.assert_allclose(np_func(a, axis=axis), f(a), rtol=5e-5)
# Use recursive cumop
a_shape = [3, 3]
a_shape[shape_axis] = block_max_size * (block_max_size + 1) + 2
a = np.random.random(a_shape).astype("float32")
a = np.sign(a - 0.5).astype("float32") # Avoid floating point error
utt.assert_allclose(np_func(a, axis=axis), f(a))
@cum_modes
def test_GpuCumOp3D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
block_max_size = self.max_threads_dim0 * 2
x = T.ftensor3('x')
for shape_axis, axis in zip([0, 1, 2, 0, 2, 1, 0], [0, 1, 2, None, -1, -2, -3]):
f = theano.function([x], op_class(axis=axis)(x), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumOp)]
# Extensive testing for the first 1025 sizes
a_shape = [5, 5, 5]
a_shape[shape_axis] = 1025
a = np.random.rand(*a_shape).astype("float32")
slices = [slice(None), slice(None), slice(None)]
for i in xrange(a.shape[shape_axis]):
slices[shape_axis] = slice(i)
fa = f(a[slices])
npa = np_func(a[slices], axis=axis)
utt.assert_allclose(npa, fa)
# Use multiple GPU threadblocks (along accumulation axis)
a_shape = [2, 2, 2]
a_shape[shape_axis] = block_max_size + 2
a = np.random.random(a_shape).astype("float32")
utt.assert_allclose(np_func(a, axis=axis), f(a))
# Use multiple GPU gridblocks (not along accumulation axis)
a_shape = [5, 5, 5]
a_shape[(shape_axis + 1) % 3] = self.max_grid_size1 + 1
a = np.random.random(a_shape).astype("float32")
if axis is None:
# Avoid floating point error
a = np.sign(a - 0.5).astype("float32")
utt.assert_allclose(np_func(a, axis=axis), f(a))
a_shape = [5, 5, 5]
a_shape[(shape_axis + 2) % 3] = self.max_grid_size1 + 1
a = np.random.random(a_shape).astype("float32")
if axis is None:
# Avoid floating point error
a = np.sign(a - 0.5).astype("float32")
utt.assert_allclose(np_func(a, axis=axis), f(a))
# Use recursive cumop (along accumulation axis)
a_shape = [3, 3, 3]
a_shape[shape_axis] = block_max_size * (block_max_size + 1) + 2
a = np.random.random(a_shape).astype("float32")
a = np.sign(a - 0.5).astype("float32") # Avoid floating point error
utt.assert_allclose(np_func(a, axis=axis), f(a))
@cum_modes
def test_GpuCumOp4D(self, mode):
op_class = partial(self.op_class, mode=mode)
# Should not use the GPU version.
x = T.ftensor4('x')
f = theano.function([x], op_class(axis=1)(x), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, CumOp)]
| StarcoderdataPython |
1780185 | class Config(object):
ENVIRONMENT = None
DEBUG = False
TESTING = False
| StarcoderdataPython |
188319 | __author__ = 'frederico'
# exercício 8
from random import randint
lista1 = ['e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9', 'e10']
# from revisao1 import lista1
# exercício 9
print(randint(0,9))
# exercício 10
lista2 = ['e11', 'e12', 'e13', 'e14', 'e15', 'e16', 'e17', 'e18', 'e19', 'e20']
# exercício 11
# print('balbala ' + str(lista2.__len__()))
while lista1.__len__() <= 19:
y = randint(0,9)
if lista2[y] not in lista1:
lista1.append(lista2[y])
print(lista1)
| StarcoderdataPython |
5080782 | import numpy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
ext = Extension('truncated_norm_C', sources=['truncated_norm_C.pyx', 'truncated_normal.c'],
include_dirs=[numpy.get_include()])
setup(ext_modules=[ext], cmdclass={'build_ext':build_ext}) | StarcoderdataPython |
6515818 | from .jointsl2loss import JointsL2Loss | StarcoderdataPython |
3552134 | # Question 3
# Given an undirected graph G, find the minimum spanning tree within G.
# A minimum spanning tree connects all vertices in a graph with the smallest possible total weight of edges.
# Your function should take in and return an adjacency list structured like this:
# {'A': [('B', 2)],
# 'B': [('A', 2), ('C', 5)],
# 'C': [('B', 5)]}
# Vertices are represented as unique strings. The function definition should be question3(G)
# This question is answered by finding the minimum spanning tree of the graph.
# To do this, Prim's algorithm will be used.
#
# class Graph:
# def __init__(self, vertices):
# self.V = vertices # No. of vertices
# self.graph = []
parent = dict()
rank = dict()
def make_set(v):
parent[v] = v
rank[v] = 0
# function to find vertex
def find(vertex):
if parent[vertex] != vertex:
parent[vertex] = find(parent[vertex])
return parent[vertex]
# Function for union of two vertices based on rank)
def union(v1, v2):
root1 = find(v1)
root2 = find(v2)
if root1 != root2:
if rank[root1] > rank[root2]:
parent[root2] = root1
else:
parent[root1] = root2
# if rank are same ,make it as root and increment it
if rank[root1] == rank[root2]:rank[root2] += 1
def question3(graph):
for vertices in graph.keys():
make_set(vertices)
#create a dictionary for minimum spanning tree
min_span_tree = {}
edges = []
for key in graph:
for element in graph[key]:
edges.append((element[1], key, element[0]))
edges.sort()
for edge in edges:
wi, v1, v2 = edge
# check vertices
if find(v1) != find(v2):
union(v1, v2)
if v1 in min_span_tree:
min_span_tree[v1].append((v2, wi))
else:
min_span_tree[v1] = [(v2, wi)]
if v2 in min_span_tree:
min_span_tree[v2].append((v1, wi))
else:
min_span_tree[v2] = [(v1, wi)]
return min_span_tree
print (question3({'A': [('B', 3), ('E', 1)],
'B': [('A', 3), ('C', 9), ('D', 2), ('E', 2)],
'C': [('B', 9), ('D', 3), ('E', 7)],
'D': [('B', 2), ('C', 3)],
'E': [('A', 1), ('B', 2), ('C', 7)]}))
# will print
# {'A': [('E', 1)],
# 'C': [('D', 3)],
# 'B': [('E', 2), ('D', 2)],
# 'E': [('A', 1), ('B', 2)],
# 'D': [('B', 2), ('C', 3)]}
print (question3({}))
# will print {}
print (question3({'A': [('B', 7), ('D', 5)],
'B': [('A', 7), ('C', 8), ('D', 9), ('E', 7)],
'C': [('B', 8), ('E', 5)],
'D': [('A', 5), ('B', 9), ('E', 15), ('F', 6)],
'E': [('B', 7), ('C', 5), ('D', 15), ('F', 8), ('G', 9)],
'F': [('D', 6), ('E', 8), ('G', 11)],
'G': [('E', 9), ('F', 11)]}))
# will print
# {'A': [('D', 5), ('B', 7)],
# 'C': [('E', 5)],
# 'B': [('A', 7), ('E', 7)],
# 'E': [('B', 7), ('C', 5), ('G', 9)],
# 'D': [('A', 5), ('F', 6)],
# 'G': [('E', 9)],
# 'F': [('D', 6)]}
#Time Complexity=O(E*V)
#Space Complexity=O(V) , where E is edges and V is vertices of the graph | StarcoderdataPython |
103726 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from airflow.models import DAG, TaskInstance
from airflow.providers.microsoft.azure.hooks.adx import AzureDataExplorerHook
from airflow.providers.microsoft.azure.operators.adx import AzureDataExplorerQueryOperator
from airflow.utils import timezone
from airflow.utils.timezone import datetime
TEST_DAG_ID = 'unit_tests'
DEFAULT_DATE = datetime(2019, 1, 1)
MOCK_DATA = {
'task_id': 'test_azure_data_explorer_query_operator',
'query': 'Logs | schema',
'database': 'Database',
'options': {'option1': 'option_value'},
}
MOCK_RESULT = {
'name': 'getschema',
'kind': 'PrimaryResult',
'data': [
{'ColumnName': 'Source', 'ColumnOrdinal': 0, 'DataType': 'System.String', 'ColumnType': 'string'},
{
'ColumnName': 'Timestamp',
'ColumnOrdinal': 1,
'DataType': 'System.DateTime',
'ColumnType': 'datetime',
},
],
}
class MockResponse:
primary_results = [MOCK_RESULT]
class TestAzureDataExplorerQueryOperator(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE, 'provide_context': True}
self.dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once', default_args=args, schedule_interval='@once')
self.operator = AzureDataExplorerQueryOperator(dag=self.dag, **MOCK_DATA)
def test_init(self):
self.assertEqual(self.operator.task_id, MOCK_DATA['task_id'])
self.assertEqual(self.operator.query, MOCK_DATA['query'])
self.assertEqual(self.operator.database, MOCK_DATA['database'])
self.assertEqual(self.operator.azure_data_explorer_conn_id, 'azure_data_explorer_default')
@mock.patch.object(AzureDataExplorerHook, 'run_query', return_value=MockResponse())
@mock.patch.object(AzureDataExplorerHook, 'get_conn')
def test_run_query(self, mock_conn, mock_run_query):
self.operator.execute(None)
mock_run_query.assert_called_once_with(
MOCK_DATA['query'], MOCK_DATA['database'], MOCK_DATA['options']
)
@mock.patch.object(AzureDataExplorerHook, 'run_query', return_value=MockResponse())
@mock.patch.object(AzureDataExplorerHook, 'get_conn')
def test_xcom_push_and_pull(self, mock_conn, mock_run_query):
ti = TaskInstance(task=self.operator, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.xcom_pull(task_ids=MOCK_DATA['task_id']), MOCK_RESULT)
| StarcoderdataPython |
1997735 | <filename>src/tasks.py
# https://www.cloudamqp.com/docs/celery.html
import celery, os, requests, json, tasks, fb, states, apiai
from intents import *
#_post_msg_url = 'https://graph.facebook.com/v2.6/me/messages?access_token='+os.environ['FBOT_ACCESS_TOKEN']
app = celery.Celery('demo')
app.conf.update(
BROKER_URL=os.environ['CLOUDAMQP_URL'],
BROKER_POOL_LIMIT=20,
BROKER_HEARTBEAT = None,
CELERY_RESULT_BACKEND = None,
CELERY_SEND_EVENTS = False,
CELERY_EVENT_QUEUE_EXPIRES = 60)
@app.task
def process(chat_platform, chat_data):
print 'process( %s )' % (json.dumps(chat_data, indent=4))
# Need to refactor below so that messages are extracted by chat_platform
if 'message' in chat_data['entry'][0]['messaging'][0]: # The 'messaging' array may contain multiple messages. Need fix.
sender_id = chat_data['entry'][0]['messaging'][0]['sender']['id']
#fb_sender_message = fb_data['entry'][0]['messaging'][0]['message']['text']
message_obj = chat_data['entry'][0]['messaging'][0]['message']
sender_message = message_obj.get('text')
payload = message_obj.get('quick_reply')
#fb_timestamp = fb_data['entry'][0]['time']
apiai_data = apiai.query(sender_id, sender_message)
# apiai_action = apiai_data.get('result').get('action')
# apiai_intent = apiai_data.get('result').get('metadata').get('intentName')
# apiai_parameters = apiai_data.get('result').get('parameters')
# apiai_fulfillment_msg = apiai_data.get("result").get("fulfillment").get("speech")
print 'API.AI Query Result: %s' % (json.dumps(apiai_data, indent = 4))
state = states.get_state(chat_platform, sender_id, sender_message, apiai_data)
state.responds_to_sender(sender_message, apiai_data, payload = payload)
return
@app.task
def save_conversation(timestamp, sender_id, sender_msg, response_msg):
''' Deprecating '''
print 'SAVE CONVERSATION...'
# 1. store message to db by POST to baymax_firebase
post_url = os.environ['POST_MSG_URL']
print 'store_dialog, POST to ' + post_url
data = {'timestamp':timestamp,'sender_id':sender_id, 'sender_msg':sender_msg, 'response_message':response_msg}
requests.post(post_url, json=data)
return
@app.task
def process_user_response(sender_id, intent, parameters):
''' Deprecating '''
print('process_user_response(%s, %s, %s)'%(sender_id, intent, '{parameters}'))
# test
state = states.get_state(sender_id)
if intent and intent in intents.keys():
intents[intent](sender_id, parameters)
else:
intents['Fallback'](sender_id, parameters)
return
###########
# Helpers #
###########
def collect_sender_info(sender_id):
print 'actually collect sender info'
profile = get_fb_profile(sender_id)
first_name = profile['first_name']
last_name = profile['last_name']
print 'sender name: '+first_name+' '+last_name
msg = first_name+' , what is the best way to contact you? Please provide us your contact preference and information?'
fb.send_message(sender_id, msg)
return
| StarcoderdataPython |
1635864 | # This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from affine import Affine
from odc.geo import CRS, geom
from odc.geo.geobox import (
GeoBox,
bounding_box_in_pixel_domain,
gbox_boundary,
geobox_intersection_conservative,
geobox_union_conservative,
scaled_down_geobox,
)
from odc.geo.math import apply_affine
from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm
# pylint: disable=pointless-statement,too-many-statements
def test_geobox_simple():
t = GeoBox(4000, 4000, Affine(0.00025, 0.0, 151.0, 0.0, -0.00025, -29.0), epsg4326)
expect_lon = np.asarray(
[
151.000125,
151.000375,
151.000625,
151.000875,
151.001125,
151.001375,
151.001625,
151.001875,
151.002125,
151.002375,
]
)
expect_lat = np.asarray(
[
-29.000125,
-29.000375,
-29.000625,
-29.000875,
-29.001125,
-29.001375,
-29.001625,
-29.001875,
-29.002125,
-29.002375,
]
)
expect_resolution = np.asarray([-0.00025, 0.00025])
assert t.coordinates["latitude"].values.shape == (4000,)
assert t.coordinates["longitude"].values.shape == (4000,)
np.testing.assert_almost_equal(t.resolution, expect_resolution)
np.testing.assert_almost_equal(t.coords["latitude"].values[:10], expect_lat)
np.testing.assert_almost_equal(t.coords["longitude"].values[:10], expect_lon)
assert (t == "some random thing") is False
# ensure GeoBox accepts string CRS
assert isinstance(
GeoBox(
4000, 4000, Affine(0.00025, 0.0, 151.0, 0.0, -0.00025, -29.0), "epsg:4326"
).crs,
CRS,
)
# Check GeoBox class is hashable
t_copy = GeoBox(t.width, t.height, t.transform, t.crs)
t_other = GeoBox(t.width + 1, t.height, t.transform, t.crs)
assert t_copy is not t
assert t == t_copy
assert len({t, t, t_copy}) == 1
assert len({t, t_copy, t_other}) == 2
def test_xy_from_geobox():
gbox = GeoBox(3, 7, Affine.translation(10, 1000), epsg3857)
xx, yy = xy_from_gbox(gbox)
assert xx.shape == gbox.shape
assert yy.shape == gbox.shape
assert (xx[:, 0] == 10.5).all()
assert (xx[:, 1] == 11.5).all()
assert (yy[0, :] == 1000.5).all()
assert (yy[6, :] == 1006.5).all()
xx_, yy_, A = xy_norm(xx, yy)
assert xx_.shape == xx.shape
assert yy_.shape == yy.shape
np.testing.assert_almost_equal((xx_.min(), xx_.max()), (0, 1))
np.testing.assert_almost_equal((yy_.min(), yy_.max()), (0, 1))
assert (xx_[0] - xx_[1]).sum() != 0
assert (xx_[:, 0] - xx_[:, 1]).sum() != 0
XX, YY = apply_affine(A, xx_, yy_)
np.testing.assert_array_almost_equal(xx, XX)
np.testing.assert_array_almost_equal(yy, YY)
def test_geobox():
points_list = [
[
(148.2697, -35.20111),
(149.31254, -35.20111),
(149.31254, -36.331431),
(148.2697, -36.331431),
],
[
(148.2697, 35.20111),
(149.31254, 35.20111),
(149.31254, 36.331431),
(148.2697, 36.331431),
],
[
(-148.2697, 35.20111),
(-149.31254, 35.20111),
(-149.31254, 36.331431),
(-148.2697, 36.331431),
],
[
(-148.2697, -35.20111),
(-149.31254, -35.20111),
(-149.31254, -36.331431),
(-148.2697, -36.331431),
(148.2697, -35.20111),
],
]
for points in points_list:
polygon = geom.polygon(points, crs=epsg3577)
resolution = (-25, 25)
geobox = GeoBox.from_geopolygon(polygon, resolution)
# check single value resolution equivalence
assert GeoBox.from_geopolygon(polygon, 25) == geobox
assert GeoBox.from_geopolygon(polygon, 25.0) == geobox
assert GeoBox.from_geopolygon(polygon, resolution, crs=geobox.crs) == geobox
assert abs(resolution[0]) > abs(
geobox.extent.boundingbox.left - polygon.boundingbox.left
)
assert abs(resolution[0]) > abs(
geobox.extent.boundingbox.right - polygon.boundingbox.right
)
assert abs(resolution[1]) > abs(
geobox.extent.boundingbox.top - polygon.boundingbox.top
)
assert abs(resolution[1]) > abs(
geobox.extent.boundingbox.bottom - polygon.boundingbox.bottom
)
A = mkA(0, scale=(10, -10), translation=(-48800, -2983006))
w, h = 512, 256
gbox = GeoBox(w, h, A, epsg3577)
assert gbox.shape == (h, w)
assert gbox.transform == A
assert gbox.extent.crs == gbox.crs
assert gbox.geographic_extent.crs == epsg4326
assert gbox.extent.boundingbox.height == h * 10.0
assert gbox.extent.boundingbox.width == w * 10.0
assert gbox.alignment == (4, 0) # 4 because -2983006 % 10 is 4
assert isinstance(str(gbox), str)
assert "EPSG:3577" in repr(gbox)
assert GeoBox(1, 1, mkA(0), epsg4326).geographic_extent.crs == epsg4326
assert GeoBox(1, 1, mkA(0), None).dimensions == ("y", "x")
g2 = gbox[:-10, :-20]
assert g2.shape == (gbox.height - 10, gbox.width - 20)
# step of 1 is ok
g2 = gbox[::1, ::1]
assert g2.shape == gbox.shape
assert gbox[0].shape == (1, gbox.width)
assert gbox[:3].shape == (3, gbox.width)
with pytest.raises(NotImplementedError):
gbox[::2, :]
# too many slices
with pytest.raises(ValueError):
gbox[:1, :1, :]
assert gbox.buffered(0, 10).shape == (gbox.height + 2 * 1, gbox.width)
assert gbox.buffered(10).shape == (gbox.height + 2 * 1, gbox.width + 2 * 1)
assert gbox.buffered(20, 30).shape == (gbox.height + 2 * 3, gbox.width + 2 * 2)
assert (gbox | gbox) == gbox
assert (gbox & gbox) == gbox
assert gbox.is_empty() is False
assert bool(gbox) is True
assert (gbox[:3, :4] & gbox[3:, 4:]).is_empty()
assert (gbox[:3, :4] & gbox[30:, 40:]).is_empty()
with pytest.raises(ValueError):
geobox_intersection_conservative([])
with pytest.raises(ValueError):
geobox_union_conservative([])
# can not combine across CRSs
with pytest.raises(ValueError):
bounding_box_in_pixel_domain(
GeoBox(1, 1, mkA(0), epsg4326), GeoBox(2, 3, mkA(0), epsg3577)
)
def test_gbox_boundary():
xx = np.zeros((2, 6))
bb = gbox_boundary(xx, 3)
assert bb.shape == (4 + (3 - 2) * 4, 2)
assert set(bb.T[0]) == {0.0, 3.0, 6.0}
assert set(bb.T[1]) == {0.0, 1.0, 2.0}
def test_geobox_scale_down():
crs = CRS("EPSG:3857")
A = mkA(0, (111.2, 111.2), translation=(125671, 251465))
for s in [2, 3, 4, 8, 13, 16]:
gbox = GeoBox(233 * s, 755 * s, A, crs)
gbox_ = scaled_down_geobox(gbox, s)
assert gbox_.width == 233
assert gbox_.height == 755
assert gbox_.crs is crs
assert gbox_.extent.contains(gbox.extent)
assert gbox.extent.difference(gbox.extent).area == 0.0
gbox = GeoBox(1, 1, A, crs)
for s in [2, 3, 5]:
gbox_ = scaled_down_geobox(gbox, 3)
assert gbox_.shape == (1, 1)
assert gbox_.crs is crs
assert gbox_.extent.contains(gbox.extent)
| StarcoderdataPython |
1928778 | <reponame>pateldevang/TensorDash<filename>tensordash/tensordash.py
import requests
import json
import tensorflow as tf
import getpass
import time
class FirebaseError(Exception):
pass
class SendDataToFirebase(object):
def __init__(self, key = None):
response = None
def signin(self, email = None, password = None):
if(email == None):
email = input("Enter Email :")
if(email != None and password == None):
password = getpass.getpass("Enter Tensordash Password :")
headers = {'Content-Type': 'application/json',}
params = (('key', '<KEY>'),)
val = {
"email" : email,
"password": password,
"returnSecureToken": "true"
}
data = str(val)
try:
response = requests.post('https://identitytoolkit.googleapis.com/v1/accounts:signInWithPassword', headers=headers, params=params, data=data)
output = response.json()
key = output['localId']
token = output['idToken']
auth_token = (('auth', token),)
except:
raise FirebaseError("Authentication Failed. Kindly create an account on the companion app")
return key, auth_token
def sendMessage(self, key = None, auth_token = None, params = None, ModelName = 'Sample Model'):
epoch, loss, acc, val_loss, val_acc = params
if(acc == None and val_loss == None):
data = '{"Epoch":' + str(epoch+1) + ', "Loss" :' + str(loss) + '}'
elif(acc == None):
data = '{"Epoch":' + str(epoch+1) + ', "Loss" :' + str(loss) + ', "Validation Loss":' + str(val_loss) + '}'
elif(val_loss == None):
data = '{"Epoch":' + str(epoch+1) + ', "Loss" :' + str(loss) + ', "Accuracy" :' + str(acc) + '}'
else:
data = '{"Epoch":' + str(epoch+1) + ', "Loss" :' + str(loss) + ', "Accuracy" :' + str(acc) + ', "Validation Loss":' + str(val_loss) + ', "Validation Accuracy" :' + str(val_acc) + '}'
response = requests.post('https://cofeeshop-tensorflow.firebaseio.com/user_data/{}/{}.json'.format(key, ModelName), params = auth_token, data=data)
def model_init(self, key = None, auth_token = None, ModelName = 'Sample Model'):
data = '{' + ModelName + ':' + '"null"' + '}'
response = requests.put('https://cofeeshop-tensorflow.firebaseio.com/user_data/{}.json'.format(key), params = auth_token, data = data)
def updateRunningStatus(self, key = None, auth_token = None, ModelName = 'Sample Model'):
data = '{"Status" : "RUNNING"}'
response = requests.put('https://cofeeshop-tensorflow.firebaseio.com/user_data/{}/{}.json'.format(key, ModelName), params = auth_token, data = data)
notif_data = '{"Key":' + '"' + str(key) + '"' + ', "Status" : "Running"}'
response = requests.post('https://cofeeshop-tensorflow.firebaseio.com/notification.json', params = auth_token, data = notif_data)
def updateCompletedStatus(self, key = None, auth_token = None, ModelName = 'Sample Model'):
data = '{"Status" : "COMPLETED"}'
response = requests.patch('https://cofeeshop-tensorflow.firebaseio.com/user_data/{}/{}.json'.format(key, ModelName), params = auth_token, data = data)
notif_data = '{"Key":' + '"' + str(key) + '"' + ', "Status" : "Completed"}'
response = requests.post('https://cofeeshop-tensorflow.firebaseio.com/notification.json', params = auth_token, data = notif_data)
def crashAnalytics(self, key = None, auth_token = None, ModelName = 'Sample Model'):
data = '{"Status" : "CRASHED"}'
response = requests.patch('https://cofeeshop-tensorflow.firebaseio.com/user_data/{}/{}.json'.format(key, ModelName), params = auth_token, data = data)
notif_data = '{"Key":' + '"' + str(key) + '"' + ', "Status" : "Crashed"}'
response = requests.post('https://cofeeshop-tensorflow.firebaseio.com/notification.json', params = auth_token, data = notif_data)
SendData = SendDataToFirebase()
class Tensordash(tf.keras.callbacks.Callback):
def __init__(self, ModelName = 'Sample_model', email = None, password =None):
self.start_time = time.time()
self.ModelName = ModelName
self.email = email
self.password = password
self.epoch_num = 0
self.key, self.auth_token = SendData.signin(email = self.email, password = <PASSWORD>)
def on_train_begin(self, logs = {}):
self.losses = []
self.accuracy = []
self.val_losses = []
self.val_accuracy = []
self.num_epochs = []
SendData.model_init(key = self.key, auth_token = self.auth_token, ModelName = self.ModelName)
SendData.updateRunningStatus(key = self.key, auth_token = self.auth_token, ModelName = self.ModelName)
def on_epoch_end(self, epoch, logs = {}):
if(time.time() - self.start_time > 3000):
self.start_time = time.time()
self.key, self.auth_token = SendData.signin(email = self.email, password = self.password)
self.losses.append(logs.get('loss'))
if(logs.get('acc') != None):
self.accuracy.append(logs.get('acc'))
else:
self.accuracy.append(logs.get('accuracy'))
self.val_losses.append(logs.get('val_loss'))
if(logs.get('val_acc') != None):
self.val_accuracy.append(logs.get('val_acc'))
else:
self.val_accuracy.append(logs.get('val_accuracy'))
self.num_epochs.append(epoch)
self.loss = float("{0:.6f}".format(self.losses[-1]))
if self.accuracy[-1] == None:
self.acc = None
else:
self.acc = float("{0:.6f}".format(self.accuracy[-1]))
if self.val_losses[-1] == None:
self.val_loss = None
else:
self.val_loss = float("{0:.6f}".format(self.val_losses[-1]))
if self.val_accuracy[-1] == None:
self.val_acc = None
else:
self.val_acc = float("{0:.6f}".format(self.val_accuracy[-1]))
values = [epoch, self.loss, self.acc, self.val_loss, self.val_acc]
self.epoch_num = epoch + 1
SendData.sendMessage(key = self.key, auth_token = self.auth_token, params = values, ModelName = self.ModelName)
def on_train_end(self, epoch, logs = {}):
if(time.time() - self.start_time > 3000):
self.start_time = time.time()
self.key, self.auth_token = SendData.signin(email = self.email, password = self.password)
SendData.updateCompletedStatus(key = self.key, auth_token = self.auth_token, ModelName = self.ModelName)
def sendCrash(self):
if(time.time() - self.start_time > 3000):
self.start_time = time.time()
self.key, self.auth_token = SendData.signin(email = self.email, password = self.password)
SendData.crashAnalytics(key = self.key, auth_token = self.auth_token, ModelName = self.ModelName)
class Customdash(object):
def __init__(self, ModelName = 'Sample Model', email = None, password = None):
self.start_time = time.time()
self.ModelName = ModelName
self.email = email
self.password = password
self.epoch = 0
self.key, self.auth_token = SendData.signin(email = self.email, password = self.password)
def sendLoss(self, epoch = None, loss = None, acc = None, val_loss = None, val_acc = None, total_epochs = None):
if(time.time() - self.start_time > 3000):
self.start_time = time.time()
self.key, self.auth_token = SendData.signin(email = self.email, password = self.password)
if(epoch == 0):
SendData.updateRunningStatus(key = self.key, auth_token = self.auth_token, ModelName = self.ModelName)
if(epoch == total_epochs - 1):
SendData.updateCompletedStatus(key = self.key, auth_token = self.auth_token, ModelName = self.ModelName)
loss = float("{0:.6f}".format(loss))
if acc != None:
acc = float("{0:.6f}".format(acc))
if val_loss != None:
val_loss = float("{0:.6f}".format(loss))
if val_acc != None:
val_acc = float("{0:.6f}".format(val_acc))
params = [epoch, loss, acc, val_loss, val_acc]
SendData.sendMessage(key = self.key, auth_token = self.auth_token, params = params, ModelName = self.ModelName)
def sendCrash(self):
if(time.time() - self.start_time > 3000):
self.start_time = time.time()
self.key, self.auth_token = SendData.signin(email = self.email, password = self.password)
SendData.crashAnalytics(key = self.key, auth_token = self.auth_token, ModelName = self.ModelName) | StarcoderdataPython |
12850200 | import argparse
import hail as hl
from gnomad.utils.vep import (
process_consequences,
filter_vep_to_canonical_transcripts,
get_most_severe_consequence_for_summary,
CSQ_CODING_HIGH_IMPACT,
CSQ_CODING_MEDIUM_IMPACT,
CSQ_CODING_LOW_IMPACT,
CSQ_NON_CODING,
)
from hail.genetics import reference_genome
from fm_insights.utils import register_log, annotate_bed
coding_high = hl.set(CSQ_CODING_HIGH_IMPACT)
coding_medium = hl.set(CSQ_CODING_MEDIUM_IMPACT)
coding_low = hl.set(CSQ_CODING_LOW_IMPACT)
non_coding = hl.set(CSQ_NON_CODING)
bed_files = {
"GRCH37": [
"gs://finemapping-insights/annotations/baselineLD_v2.2/Promoter_UCSC.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/DHSmerged_Ulirsch.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/Roadmap_H3K27ac_Ulirsch.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/CA_H3K27ac_Ulirsch.bed",
],
"GRCh38": [
"gs://meta-finemapping-simulation/annotations_hg38/Promoter_UCSC.bed",
"gs://meta-finemapping-simulation/annotations_hg38/DHSmerged_Ulirsch.bed",
"gs://meta-finemapping-simulation/annotations_hg38/Roadmap_H3K27ac_Ulirsch.bed",
"gs://meta-finemapping-simulation/annotations_hg38/CA_H3K27ac_Ulirsch.bed",
],
}
gnomad_latest_versions = {"GRCh37": "2.1.1", "GRCh38": "3.1.2"}
gnomad_v2_pops = ["afr", "amr", "asj", "eas", "fin", "nfe", "nfe_est", "nfe_nwe", "nfe_onf", "nfe_seu"]
gnomad_v3_pops = ["afr", "ami", "amr", "asj", "eas", "mid", "fin", "nfe", "oth", "sas"]
def annotate_consequence_category(csq_expr, annot_location="consequence_category"):
annot_expr = {
annot_location: hl.case()
.when(coding_high.contains(csq_expr), "coding_high")
.when(coding_medium.contains(csq_expr), "coding_medium")
.when(coding_low.contains(csq_expr), "coding_low")
.when(non_coding.contains(csq_expr), "non_coding")
.or_missing()
}
return annot_expr
def main(args):
reference_genome = args.reference_genome
if reference_genome == "GRCh37":
from gnomad.resources.grch37.gnomad import public_release
ht = public_release("genomes").versions[gnomad_latest_versions[reference_genome]].ht()
freq_index_dict = ht.freq_index_dict.collect()[0]
freq_expr = {pop: ht.freq[freq_index_dict[f"gnomad_{pop}"]] for pop in gnomad_v2_pops}
freq_expr.update({"all": ht.freq[freq_index_dict[f"gnomad"]]})
elif reference_genome == "GRCh38":
from gnomad.resources.grch38.gnomad import public_release
ht = public_release("genomes").versions[gnomad_latest_versions[reference_genome]].ht()
freq_index_dict = ht.freq_index_dict.collect()[0]
freq_expr = {pop: ht.freq[freq_index_dict[f"{pop}-adj"]] for pop in gnomad_v3_pops}
freq_expr.update({"all": ht.freq[freq_index_dict[f"adj"]]})
else:
raise ValueError("Invalid --reference-genome")
ht = ht.annotate(freq=hl.struct(**freq_expr))
ht = filter_vep_to_canonical_transcripts(ht)
ht = process_consequences(ht)
ht = get_most_severe_consequence_for_summary(ht)
# extract most severe
ht = ht.select(
freq=ht.freq,
most_severe=hl.if_else(hl.is_defined(ht.most_severe_csq), ht.most_severe_csq, "intergenic_variant"),
gene_most_severe=ht.vep.worst_csq_for_variant_canonical.gene_symbol,
lof=ht.vep.worst_csq_for_variant_canonical.lof,
hgnc_id=ht.vep.worst_csq_for_variant_canonical.hgnc_id,
hgvsp=ht.vep.worst_csq_for_variant_canonical.hgvsp,
transcript_id=ht.vep.worst_csq_for_variant_canonical.transcript_id,
polyphen_prediction=ht.vep.worst_csq_for_variant_canonical.polyphen_prediction,
polyphen_score=ht.vep.worst_csq_for_variant_canonical.polyphen_score,
sift_prediction=ht.vep.worst_csq_for_variant_canonical.sift_prediction,
sift_score=ht.vep.worst_csq_for_variant_canonical.sift_score,
protein_coding=ht.protein_coding,
)
ht = ht.select_globals()
ht = ht.annotate(**annotate_consequence_category(ht.most_severe))
ht = annotate_bed(ht, bed_files=bed_files[reference_genome], reference_genome=reference_genome)
ht = ht.annotate(
consequence=(
hl.case(missing_false=True)
.when(hl.is_defined(ht.lof) & (ht.lof != "LC"), "pLoF")
.when(
(ht.lof == "LC")
| (ht.consequence_category == "coding_high")
| (ht.consequence_category == "coding_medium"),
"Missense",
)
.when(ht.consequence_category == "coding_low", "Synonymous")
.when(ht.most_severe == "3_prime_UTR_variant", "UTR3")
.when(ht.most_severe == "5_prime_UTR_variant", "UTR5")
.when(ht.Promoter_UCSC == 1, "Promoter")
.when(
(ht.DHSmerged_Ulirsch == 1) & ((ht.Roadmap_H3K27ac_Ulirsch == 1) | (ht.CA_H3K27ac_Ulirsch == 1)), "CRE"
)
.default("Non-genic")
)
)
ht.describe()
ht = ht.checkpoint(
f"gs://meta-finemapping-simulation/gnomad/gnomad.genomes.r{gnomad_latest_versions[args.reference_genome]}.sites.most_severe.ht",
overwrite=args.overwrite,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--reference-genome", type=str, required=True)
parser.add_argument("--overwrite", action="store_true")
args = parser.parse_args()
register_log()
main(args)
| StarcoderdataPython |
4989831 | <filename>parsercode/daedcode/process_archive.py
"""
Process all logs in archive
Only need to use this if rebuilding draws.txt
Writes to draws_debug.txt
"""
import glob
import os
import parsercode.utils as utils
if __name__ == '__main__':
utils.Log('Starting\n')
# Clear the debug file.
f = open('draws_debug.txt', 'w')
f.close()
file_list = glob.glob(os.path.join('archive', 'output_log*.txt'))
for filename in file_list:
try:
utils.ProcessFile(filename, debug=True, verbose=False)
except Exception as ex:
utils.Log(str(ex))
raise
| StarcoderdataPython |
3576723 | # disable visual plots
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import pystan
from scipy.stats.distributions import cauchy, norm, t as student_t
import arviz as az
import pickle
import gzip
from datetime import datetime
import os
stime = datetime.now()
print("Starting: ", stime)
distributions = {
'cauchy' : (cauchy, "generated quantities {{ real z; z = cauchy_rng({},{});}}", [(0,1)]),
'student_t' : (student_t, "generated quantities {{ real z; z = student_t_rng({},{},{});}}", [(2,0,1), (3,0,1), (10,0,1)]),
'normal' : (norm, "generated quantities {{ real z; z = normal_rng({},{});}}", [(0,1)]),
}
print("Reading samples", flush=True)
with gzip.open("./neff_samples.pickle.gz", "rb") as f:
neffs = pickle.loads(f.read(-1))
print("Starting to plot", flush=True)
for key, eff_ns in neffs.items():
for key_, (eff_n_scipy, eff_n_stan) in eff_ns.items():
ax = az.kdeplot(eff_n_scipy, plot_kwargs={'color' : 'k', 'linewidth' : 2}, label=f'scipy', rug=True)
ax = az.kdeplot(eff_n_stan, plot_kwargs={'color' : 'r', 'ls' : '--', 'linewidth' : 2}, ax=ax, label=f'stan', rug=True)
ax.axvline(4000, color='k', ls='dotted', ymin=0.1)
ax.legend(fontsize=20)
ax.set_yticks([])
x_ticks = list(map(int, ax.get_xticks()))
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_ticks, fontsize=15);
ax.text(0.02, 0.93, key_.replace("_", " "), transform=ax.transAxes, fontsize=40, horizontalalignment='left', verticalalignment='center')
fig = ax.figure
plt.savefig(f"{key_}", dpi=300, bbox_inches='tight')
plt.close("all")
etime = datetime.now()
duration = etime - stime
print("Finished:", etime)
print("Duration", duration)
| StarcoderdataPython |
306808 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'socker_server_window.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(571, 563)
Form.setStyleSheet("background-color: rgb(40, 40, 40);\n"
"font: 14pt \"Arial\";\n"
"color: rgb(255, 255, 255);")
self.gridLayout_2 = QtWidgets.QGridLayout(Form)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.tcp_checkBox = QtWidgets.QCheckBox(self.groupBox)
self.tcp_checkBox.setObjectName("tcp_checkBox")
self.buttonGroup = QtWidgets.QButtonGroup(Form)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.tcp_checkBox)
self.horizontalLayout_3.addWidget(self.tcp_checkBox)
self.udp_checkBox = QtWidgets.QCheckBox(self.groupBox)
self.udp_checkBox.setObjectName("udp_checkBox")
self.buttonGroup.addButton(self.udp_checkBox)
self.horizontalLayout_3.addWidget(self.udp_checkBox)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setObjectName("label_3")
self.horizontalLayout_6.addWidget(self.label_3)
self.save_path_lineEdit = QtWidgets.QLineEdit(self.groupBox)
self.save_path_lineEdit.setMinimumSize(QtCore.QSize(0, 35))
self.save_path_lineEdit.setMaximumSize(QtCore.QSize(219, 16777215))
self.save_path_lineEdit.setReadOnly(True)
self.save_path_lineEdit.setObjectName("save_path_lineEdit")
self.horizontalLayout_6.addWidget(self.save_path_lineEdit)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setMinimumSize(QtCore.QSize(81, 0))
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.host_lineEdit = QtWidgets.QLineEdit(self.groupBox)
self.host_lineEdit.setMinimumSize(QtCore.QSize(0, 35))
self.host_lineEdit.setMaximumSize(QtCore.QSize(219, 16777215))
self.host_lineEdit.setObjectName("host_lineEdit")
self.horizontalLayout.addWidget(self.host_lineEdit)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setMinimumSize(QtCore.QSize(81, 0))
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.port_lineEdit = QtWidgets.QLineEdit(self.groupBox)
self.port_lineEdit.setMinimumSize(QtCore.QSize(0, 35))
self.port_lineEdit.setMaximumSize(QtCore.QSize(219, 16777215))
self.port_lineEdit.setObjectName("port_lineEdit")
self.horizontalLayout_2.addWidget(self.port_lineEdit)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.start_server_pushButton = QtWidgets.QPushButton(self.groupBox)
self.start_server_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.start_server_pushButton.setObjectName("start_server_pushButton")
self.horizontalLayout_5.addWidget(self.start_server_pushButton)
self.open_folder_pushButton = QtWidgets.QPushButton(self.groupBox)
self.open_folder_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.open_folder_pushButton.setObjectName("open_folder_pushButton")
self.horizontalLayout_5.addWidget(self.open_folder_pushButton)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem4)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem5, 0, 1, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox)
self.textBrowser = QtWidgets.QTextBrowser(Form)
self.textBrowser.setStyleSheet("background-color: rgb(40, 40, 40);\n"
"font: 14pt \"Arial\";\n"
"color: rgb(255, 255, 255);")
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout_2.addWidget(self.textBrowser)
self.gridLayout_2.addLayout(self.verticalLayout_2, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.groupBox.setTitle(_translate("Form", "通讯方式"))
self.tcp_checkBox.setText(_translate("Form", "TCP"))
self.udp_checkBox.setText(_translate("Form", "UDP"))
self.label_3.setText(_translate("Form", "存储路径:"))
self.label.setText(_translate("Form", "主机:"))
self.host_lineEdit.setText(_translate("Form", "127.0.0.1"))
self.label_2.setText(_translate("Form", "端口:"))
self.port_lineEdit.setText(_translate("Form", "1234"))
self.start_server_pushButton.setText(_translate("Form", "开启服务"))
self.open_folder_pushButton.setText(_translate("Form", "打开文件夹"))
self.textBrowser.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:14pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
| StarcoderdataPython |
9753626 |
with open('tokens.txt', 'r') as f:
for l in f.readlines():
l = l.strip()
if l == "":
continue
print("{}: \"{}\",".format(l, l))
| StarcoderdataPython |
3273528 | <filename>wsi_3/game/locals.py
"""CONSTANS"""
WIDTH, HEIGTH = 800, 800
ROWS, COLS = 8, 8
SQUARE_SIZE = WIDTH // COLS
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
| StarcoderdataPython |
1917697 | import re
file = open('./input', 'r')
lines = file.readlines()
lines = list(map(lambda line: line[:-1], lines))
def list_to_string(l):
s = ''
for c in l: s += c
return s
ids = []
for line in lines:
row_raw = line[:7]
col_raw = line[7:]
row_bin = list_to_string(list(map(lambda c: '0' if c == 'F' else '1', row_raw)))
col_bin = list_to_string(list(map(lambda c: '0' if c == 'L' else '1', col_raw)))
id = int(row_bin, 2) * 8 + int(col_bin, 2)
ids.append(id)
ids.sort()
for i in range(len(ids)):
if i > 0:
if ids[i - 1] != ids[i] - 1:
print(ids[i - 1:i + 1])
if i < len(ids) - 1:
if ids[i + 1] != ids[i] + 1:
print(ids[i - 1:i + 1])
| StarcoderdataPython |
3213273 | <filename>test/minermedic_002_mining_pools_test.py<gh_stars>1-10
# minermedic_002_mining_pools_test.py, Copyright (c) 2019, Phenome Project - <NAME> <<EMAIL>>
import sys
from phenome.test import BaseTest
from phenome.test.supporting.test_mockobject import MockObject
from minermedic.pools.helper import process_pool_apis
from minermedic.miner_results import MinerResults
from phenome import flask_app
from phenome_core.core.base.logger import root_logger as logger
from phenome_core.core.database.model.api import get_objectmodel_by_name
sys._unit_tests_running = True
sys._unit_tests_use_fake_values = True
sys._unit_tests_load_app_data = True
sys._unit_tests_load_app_meta_data = True
sys._unit_tests_app_name = "minermedic"
class TestMiningPools(BaseTest):
def setUp(self):
super(TestMiningPools, self).setUp()
def _test_mining_pool(self, watts, coin_address, worker, algo, hashrate_by_algo,
pool, sim_urls, sim_file, miner_model_id):
algo_id = 0
hashrate = None
profitability = None
results = MinerResults()
miner = MockObject()
if miner_model_id is not None:
miner.model = get_objectmodel_by_name(miner_model_id)
# use an arbitrary number for our power so we can get profitability numbers
miner.power_usage_watts = watts
# coin address is needed to query for ethermine stats
miner.coin_address = coin_address
worker = worker
algo = algo
pool = pool
# tell the REST API which URLS to simulate, which port to target
sys._unit_tests_API_SIMULATE_URLS = sim_urls
sys._unit_tests_API_TARGET_LOC = self.CONST_SIMULATOR_API_TARGET_LOC
sys._unit_tests_API_TARGET_PORT = self.CONST_SIMULATOR_API_TARGET_PORT
# do the POOL API and HASHRATE calls
sys._unit_tests_MINERMEDIC_CALL_POOL_APIS = True
sys._unit_tests_MINERMEDIC_CALL_HASHRATE_CALCS = True
# start simulator
# get path to data file
simulator_data_path = self.absolute_path_of_test_directory + "/apps/minermedic/resources/mining_pools/" + sim_file
# start the simulator
simulator = self.startSimulator(simulator_data_path, 'HTTP', str(self.CONST_SIMULATOR_API_TARGET_PORT))
try:
# contact pools, do profitability stuff
process_pool_apis(results, miner, worker, algo, pool)
# get the resulting "algo_idx"
algo_id = results.get_result(miner.id, 'algo')
# get the accepted hashrate from the POOL
hashrate = results.get_result(miner.id, 'hashrates_by_algo')[hashrate_by_algo]['accepted']
profitability = results.get_result(miner.id, 'profitability')
except Exception as ex:
logger.error(ex)
finally:
simulator.stop()
# return the results
return algo_id, hashrate, profitability
def test_001_mining_pool_hub(self):
# set the needed API and USER_ID keys
flask_app.config['MINING_POOL_HUB_API_KEY'] = 'af3770711a5bbeed369699c1a0ca793cb2ab8ef49d9ca0d4896eac344f9fffff'
flask_app.config['MINING_POOL_HUB_USER_ID'] = '123456'
watts = 800
worker = "worker1"
coin_address = None
algo_in = "myriadcoin-groestl"
algo_out = 'Myriad-Groestl'
pool = "hub.miningpoolhub.com:17005"
sim_urls = ["(.*)(miningpoolhub.com)","(min-api.cryptocompare.com)"]
sim_file = "miningpoolhub.py"
algo_id, hashrate, profitability = \
self._test_mining_pool(watts, coin_address, worker, algo_in, algo_out, pool, sim_urls, sim_file, None)
# now do the tests
self.assertEqual(algo_id, 100)
self.assertEqual(hashrate, 10.536986433000001)
self.assertEqual(profitability, 0.24463984606636374)
def test_002_nicehash(self):
watts = 800
worker = "worker1"
coin_address = '1J6HNskoH271PVPFvfAmBqUmarMFjwwAAA'
algo_in = "lbry"
algo_out = 'lbry'
pool = "lbry.usa.nicehash.com:3356"
sim_urls = ["(.*)(nicehash.com)","(min-api.cryptocompare.com)"]
sim_file = "nicehash.py"
# for NICEHASH we need to pull the speed information off of the model configuration
model_id = "ASIC_Baikal_GIANT_B"
algo_id, hashrate, profitability = \
self._test_mining_pool(watts, coin_address, worker, algo_in, algo_out, pool, sim_urls, sim_file, model_id)
# now do the tests
self.assertEqual(algo_id, 23)
self.assertEqual(hashrate, 37.04807992343783)
self.assertEqual(profitability, 0.015007162499999997)
def test_003_ethermine_eth(self):
watts = 800
sim_urls = ["(.*)(.ethermine.org)","(min-api.cryptocompare.com)"]
sim_file = "ethermine_eth.py"
coin_address = '0x41daf079cdefa7800eab2e51748614f0d386b1ff'
worker = "4814F8FF882B"
algo_in = "ethash"
algo_out = "ethash"
pool = "us1.ethermine.org:4444"
algo_id, hashrate, profitability = \
self._test_mining_pool(watts, coin_address, worker, algo_in, algo_out, pool, sim_urls, sim_file, None)
# now do the tests
self.assertEqual(algo_id, 20)
self.assertEqual(hashrate, 35555555.55555555)
self.assertEqual(profitability, 0.06844435263755579)
def test_004_ethermine_etc(self):
watts = 800
sim_urls = ["(.*)(.ethermine.org)","(min-api.cryptocompare.com)"]
sim_file = "ethermine_etc.py"
coin_address = '0x2da4e946c0ee6977bc44fbba9019b3931952cfff'
worker = "holitics1"
algo_in = "ethash"
algo_out = "ethash"
pool = "us1-etc.ethermine.org:14444"
algo_id, hashrate, profitability = \
self._test_mining_pool(watts, coin_address, worker, algo_in, algo_out, pool, sim_urls, sim_file, None)
# now do the tests
self.assertEqual(algo_id, 20)
self.assertEqual(hashrate, 188888888.8888889)
self.assertEqual(profitability, 0.37976668267087954)
def test_005_f2pool(self):
watts = 800
sim_urls = ["(.*)(.f2pool.com)(.*)","(min-api.cryptocompare.com)"]
sim_file = "f2pool.py"
coin_address = 'holitics'
worker = "001"
algo_in = "scrypt"
algo_out = "scrypt"
pool = "ltc-us.f2pool.com:8888"
algo_id, hashrate, profitability = \
self._test_mining_pool(watts, coin_address, worker, algo_in, algo_out, pool, sim_urls, sim_file, None)
# now do the tests
self.assertEqual(algo_id, 0)
self.assertEqual(hashrate, 0.272014595)
self.assertEqual(profitability, 0.7613440161264865)
| StarcoderdataPython |
1844062 | #!/usr/bin/env python3.8
# Copyright 2021 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Produces a representation of the dependencies between Banjo libraries.'''
import argparse
import copy
import json
import re
import sys
_LIBRARY_LABEL = r'^//sdk/banjo/([^:]+):\1(\.\.\.)?$'
_DUMMY_LIBRARIES = set(
[
'zircon.hw.pci',
'zircon.hw.usb',
'zircon.syscalls.pci',
])
def _extract_dependencies(label, base_depth, deps, result):
index = 0
current_label = None
dep_labels = []
while index < len(deps):
depth = deps[index].index('//')
if depth <= base_depth:
break
elif depth == base_depth + 2:
current_label = re.sub(r'\.\.\.', '', deps[index]).strip()
dep_labels.append(current_label)
index += 1
else:
index += _extract_dependencies(
current_label, base_depth + 2, deps[index:], result)
result[label] = dep_labels
return index
def extract_dependencies(deps):
result = {}
_extract_dependencies('main', -2, deps, result)
return result
def filter_banjo_libraries(deps):
# This filters dep lists to retain only Banjo libraries.
normalize_deps = lambda l: list(
filter(lambda i: i, map(lambda c: get_library_label(c), l)))
# Retain only Banjo libraries at the top level and normalize their dep lists.
result = dict(
map(
lambda r: (r[0], normalize_deps(r[1])),
filter(
lambda t: t[0],
map(lambda p: (get_library_label(p[0]), p[1]), deps.items()))))
# Add all standalone libraries.
for lib in normalize_deps(deps['main']):
if lib not in result:
result[lib] = []
return result
def get_library_label(label):
match = re.match(_LIBRARY_LABEL, label)
return match.group(1) if match else None
def remove_library(name, deps):
if name in deps:
del deps[name]
for v in deps.values():
if name in v:
v.remove(name)
def add_back_edges(deps):
result = copy.deepcopy(deps)
for lib, lib_deps in deps.items():
for d in lib_deps:
result[d].append(lib)
return result
def find_connected_components(deps):
def find_component(library, component):
connections = deps.pop(library)
component.add(library)
for c in connections:
if c not in deps:
continue
find_component(c, component)
return component
result = []
while deps:
result.append(find_component(list(deps.keys())[0], set()))
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--gn-deps',
help='Path to the JSON-formatted files with dependency info',
required=True)
args = parser.parse_args()
with open(args.gn_deps, 'r') as deps_file:
deps = json.load(deps_file)
all_deps = extract_dependencies(deps['//sdk/banjo:banjo']['deps'])
banjo_deps = filter_banjo_libraries(all_deps)
remove_library('zx', banjo_deps)
remove_library('fuchsia.hardware.composite', banjo_deps)
banjo_graph = add_back_edges(banjo_deps)
components = find_connected_components(banjo_graph)
blocked = filter(lambda g: g & _DUMMY_LIBRARIES, components)
if blocked:
print()
print('Blocked by dummy libraries')
all_blocked = set()
for group in blocked:
all_blocked |= group
for library in sorted(all_blocked - _DUMMY_LIBRARIES):
print(' - ' + library)
standalones = filter(
lambda s: len(s) == 1 and not s & _DUMMY_LIBRARIES, components)
if standalones:
print()
print('Standalone:')
for singleton in standalones:
print(' - ' + next(iter(singleton)))
groups = map(
lambda t: t[1],
sorted(
map(
lambda s: (len(s), s),
filter(
lambda g: len(g) > 1 and not g & _DUMMY_LIBRARIES,
components))))
if groups:
print()
print('Groups:')
for index, group in enumerate(groups):
print('[' + str(index) + ']')
for library in sorted(group):
print(' - ' + library)
return 0
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
1729890 | import logging
import codecs
import re
from sortedcontainers import SortedSet
from dse.cqlengine import columns
from dse.cqlengine.models import Model
from dse import ConsistencyLevel
from nltk.corpus import stopwords
class SearchVideo():
def __init__(self, user_id, added_date, video_id, name, preview_image_location):
self.user_id = user_id
self.added_date = added_date
self.video_id = video_id
self.name = name
self.preview_image_location = preview_image_location
class SearchVideoResults():
def __init__(self, query, paging_state, videos):
self.query = query
self.paging_state = paging_state
self.videos = videos
class TagsByLetterModel(Model):
"""Model class that maps to the tags_by_letter table"""
__table_name__ = 'tags_by_letter'
first_letter = columns.Text(primary_key=True)
tag = columns.Text(primary_key=True)
class SearchService(object):
"""Provides methods that implement functionality of the Search Service."""
def __init__(self, session):
self.session = session
# Prepared statements for search_videos() and get_query_suggestions()
self.search_videos_prepared = \
session.prepare('SELECT * FROM videos WHERE solr_query = ?')
self.get_query_suggestions_prepared = \
session.prepare('SELECT name, tags, description FROM videos WHERE solr_query = ?')
self.stop_words = set(stopwords.words('english'))
def search_videos(self, query, page_size, paging_state):
if not query:
raise ValueError('No query string provided')
if page_size <= 0:
raise ValueError('Page size should be strictly positive for video search')
results = list()
next_page_state = ''
# https://docs.datastax.com/en/dse/6.0/cql/cql/cql_using/search_index/cursorsDeepPaging.html
solr_query = '{"q":"name:(' + query + ')^4 OR tags:(' + query + ')^2 OR description:(' + query + ')", "paging":"driver"}'
bound_statement = self.search_videos_prepared.bind([solr_query])
bound_statement.fetch_size = page_size
bound_statement.consistency_level = ConsistencyLevel.LOCAL_ONE # required for search queries
result_set = None
if paging_state:
# see below where we encode paging state to hex before returning
result_set = self.session.execute(bound_statement, paging_state=codecs.decode(paging_state, 'hex'))
else:
result_set = self.session.execute(bound_statement)
# deliberately avoiding paging in background
current_rows = result_set.current_rows
remaining = len(current_rows)
for video_row in current_rows:
logging.debug('next search video is: ' + video_row['name'])
results.append(SearchVideo(user_id=video_row['userid'], added_date=video_row['added_date'],
video_id=video_row['videoid'], name=video_row['name'],
preview_image_location=video_row['preview_image_location']))
# ensure we don't continue asking and pull another page
remaining -= 1
if (remaining == 0):
break
if len(results) == page_size:
# Use hex encoding since paging state is raw bytes that won't encode to UTF-8
next_page_state = codecs.encode(result_set.paging_state, 'hex')
return SearchVideoResults(query=query, paging_state=next_page_state, videos=results)
def get_query_suggestions(self, query, page_size):
if not query:
raise ValueError('No query string provided')
if page_size <= 0:
raise ValueError('Page size should be strictly positive for search suggestions')
results = list()
next_page_state = ''
solr_query = '{"q":"name:(' + query + '*) OR tags:(' + query + '*) OR description:(' + query + '*)", "paging":"driver"}'
bound_statement = self.get_query_suggestions_prepared.bind([solr_query])
# TODO: not sure we're interpreting page size correctly here.
# Should it be a limit on our database query, or a limit on the number of terms returned?
bound_statement.fetch_size = page_size
bound_statement.consistency_level = ConsistencyLevel.LOCAL_ONE # required for search queries
result_set = self.session.execute(bound_statement)
# deliberately avoiding paging in background
current_rows = result_set.current_rows
remaining = len(current_rows)
suggestions = SortedSet()
pattern = re.compile(r'\b' + re.escape(query) + r'[a-z]*\b')
for video_row in current_rows:
logging.debug('next video used for suggestions is: ' + video_row['name'])
for name_term in re.findall(pattern, video_row['name']):
logging.debug('Name term: ' + name_term)
suggestions.add(name_term)
for tag in video_row['tags']:
for tag_term in re.findall(pattern, tag):
logging.debug('Tag term: ' + tag_term)
suggestions.add(tag_term)
for desc_term in re.findall(pattern, video_row['description']):
logging.debug('Description term: ' + desc_term)
suggestions.add(desc_term)
# ensure we don't continue asking and pull another page
remaining -= 1
if (remaining == 0):
break
# remove stop words
suggestions.difference_update(self.stop_words)
return list(suggestions)
| StarcoderdataPython |
399021 | <gh_stars>0
from rest_framework import serializers
from api.models.device import Device
from api.models.order import Order
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
devices = serializers.PrimaryKeyRelatedField(many=True, queryset=Device.objects.all())
orders = serializers.PrimaryKeyRelatedField(many=True, queryset=Order.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'devices', 'orders') | StarcoderdataPython |
1729052 | <filename>scraper/main.py<gh_stars>0
#principal functions
from data import Amazon, Ebay, Mercado_Libre, Best_Buy, headers
from scrape_funcs import extract_soup, search_boxes
from page_getters import get_names, get_images, get_products_urls, get_price
# Clasification info
from clasificator import add_clasification_info
# Corpus and Params
from statistics import min_with_none
from corpus import create_corpus_points, hit_points_info
# Basics
import requests
import time
import re
def request_products(user_request, Page, header, home=False, country='mx'):
page_url = Page.adapt_url(Page, user_request, country)
# All the HTML of the page
page_soup, status = extract_soup(page_url, header)
# Wait until receive the info or been denied
if status == 503:
while status == 503:
time.sleep(1)
page_soup, status = extract_soup(page_url)
elif status == 200:
# HTML divided by products, and stored as elements of an array
page_boxes = search_boxes(page_soup, Page.boxes)
page_products = {}
# Obtain the info of the product
page_products['names'] = get_names(page_boxes, Page)
page_products['images'] = get_images(page_boxes, Page)
page_products['urls'] = get_products_urls(page_boxes, Page)
page_products['prices'] = get_price(country, page_boxes, Page, home)
page_products['status'] = status
return page_products
else:
page_products = {}
# With the empty values, not None, the script knows that this won't be
# uploaded. In case of one 'None', it thinks that there was a product box
# without info. Somethings that occurs in Amazon
page_products['store'] = Page.name
page_products['idx'] = Page.index
page_products['product'] = user_request
page_products['names'] = []
page_products['images'] = []
page_products['urls'] = []
page_products['prices'] = []
page_products['status'] = status
return page_products
def scrap_product_in_Pages(user_request, test=False, home=False):
#header to be used
h = 0
products_from_pages = []
url = 'https://vsbuy.xyz/scrap/'
Pages = [Amazon, Mercado_Libre, Ebay, Best_Buy]
products_from_pages = []
for Page in Pages:
products_from_pages.append(request_products(user_request, Page, header=h, home=home))
if h < 3:
h = 0
else:
h +=1
page_names = []
for page in products_from_pages:
# The error 503 is considered at the moment of scraping the page. If status
# turns to be a 400 type, it returns the status and breaks the code for that
# page
if page['status'] == 200:
page_names.append(page['names'])
else:
print(f"Error: {page['status']} with {page['status']}")
if len(page_names) > 0:
# Creates a dictionary with point values of each word by:
# 1) Aparition oreder: 1st = highest, last = lowest.
# 2) Times that shows.
# Needs to be improve in the tokenizer
corpus = create_corpus_points(user_request, page_names, header=h)
for p in range(len(Pages)):
page_products = products_from_pages[p]
Page = Pages[p]
if str(type(page_products['prices'])) == "<class 'list'>":
# Using the corpus info
page_products['hit points'] = hit_points_info(user_request, page_products, corpus)
# This makes more sense with the Jupyer/Colab Notebook
recomended_product = add_clasification_info(page_products)
if test == True:
print('Info of the cheapest product recomended')
# Info that completes the DB requirements
# Is added here because this is the last filter of knowing if
# the page give something to add or not.
recomended_product['store'] = Page.index
recomended_product['product'] = user_request
if test == True:
for key, value in recomended_product.items():
print(key, ':', value)
# Posting data in Server
r = requests.post(url=url, json=recomended_product)
# In case de server returnsan error and needs to try de uploading again:
request_answer = str(r.text)
if re.match(u"<code>DEBUG" , request_answer) is not None:
t=1
while re.match(u"<code> DEBUG" ,r.text) is not None and t < 10:
print('Server Error. Trying again...')
r = requests.post(url=url, json=recomended_product)
time.sleep(1)
t += 1
else:
print(f'Succes uploading! {user_request}')
print('\n')
if __name__ == '__main__':
user_request = input('Enter the product that you want to scrap: ')
print('\n')
scrap_product_in_Pages(user_request, test=True, home=True)
| StarcoderdataPython |
12826444 | <filename>bgkube/bg.py
from six import add_metaclass
from time import sleep
from bgkube import cmd, registries
from bgkube.api import KubeApi
from bgkube.run import Runner
from bgkube.errors import ActionFailedError
from bgkube.utils import output, log, timestamp, require, get_loadbalancer_address, is_host_up
class BgKubeMeta(type):
required = [
'cluster_zone', 'cluster_name', 'image_name', 'service_name', 'service_config', 'deployment_config'
]
optional = [
'context', 'dockerfile', 'env_file', 'smoke_tests_command', 'smoke_service_config', 'docker_machine_name',
'db_migrations_job_config_seed', 'db_migrations_status_command', 'db_migrations_apply_command',
'db_migrations_rollback_command', 'kops_state_store', 'container_registry', 'service_timeout',
'smoke_service_timeout', 'deployment_timeout', 'db_migrations_job_timeout', 'docker_build_args'
]
optional_defaults = {
'context': '.',
'dockerfile': './Dockerfile',
'container_registry': registries.DEFAULT,
'service_timeout': 120,
'smoke_service_timeout': 120,
'deployment_timeout': 120,
'db_migrations_job_timeout': 120,
'docker_build_args': ''
}
def __new__(mcs, name, bases, attrs):
attrs['required'] = mcs.required
attrs['optional'] = mcs.optional
for field in mcs.required + mcs.optional:
attrs[field] = mcs.optional_defaults.get(field, None)
return super(BgKubeMeta, mcs).__new__(mcs, name, bases, attrs)
@add_metaclass(BgKubeMeta)
class BgKube(object):
def __init__(self, options):
self.load_options(options)
self.kube_api = KubeApi()
self.runner = Runner(self.get_docker_daemon())
self.registry = registries.load(self.runner, options)
@property
def is_minikube(self):
return self.container_registry == 'local'
def get_docker_daemon(self):
return cmd.MINIKUBE_DOCKER_ENV if self.is_minikube else cmd.DOCKERMACHINE_ENV.format(self.docker_machine_name)
def load_options(self, options):
for opt in self.required:
setattr(self, opt, require(options, opt))
for opt in self.optional:
setattr(self, opt, getattr(options, opt, None) or getattr(self, opt))
@log('Building image {image_name} using {dockerfile}...')
def build(self):
tag = timestamp()
command = [cmd.DOCKER_BUILD.format(
context=self.context,
dockerfile=self.dockerfile,
image=self.image_name,
tag=tag,
)]
if self.docker_build_args:
command.append(' '.join('--build-arg {}'.format(b) for b in self.docker_build_args.split(' ')))
self.runner.start(' '.join(command))
return tag
@log('Pushing image {image_name}:{tag} to {registry}...')
def push(self, tag):
self.registry.push('{}:{}'.format(self.image_name, tag))
@log('Applying {_} using config: {filename}...')
def apply(self, _, filename, tag=None, color=''):
return self.kube_api.apply(filename, self.env_file, TAG=tag, COLOR=color, ENV_FILE=self.env_file)
def pod_find(self, tag):
results = [pod for pod in self.kube_api.pods(tag=tag) if pod.ready]
return results[0] if results else None
def pod_exec(self, tag, command, *args):
pod = self.pod_find(tag).name
return self.runner.start(cmd.KUBECTL_EXEC.format(pod=pod, command=command, args=' '.join(args)), capture=True)
def migrate_initial(self, tag):
if self.db_migrations_job_config_seed:
def job_completions_extractor(job):
completions = job.obj['spec']['completions']
succeeded_completions = job.obj['status']['succeeded']
return completions if succeeded_completions == completions else None
applied_objects = self.apply('db migration', self.db_migrations_job_config_seed, tag=tag)
self.wait_for_resource_running(
'Job',
'completions',
job_completions_extractor,
self.db_migrations_job_timeout,
*applied_objects
)
def migrate_apply(self, tag):
previous_state = None
if self.db_migrations_status_command:
previous_state = self.pod_exec(tag, self.db_migrations_status_command)
if self.db_migrations_apply_command:
self.pod_exec(tag, self.db_migrations_apply_command)
return previous_state
def migrate_rollback(self, tag, previous_state):
if self.db_migrations_rollback_command:
self.pod_exec(tag, self.db_migrations_rollback_command, previous_state)
def migrate(self, tag):
db_migrations_previous_state = None
is_initial = self.active_env() is None
if is_initial:
self.migrate_initial(tag)
else:
db_migrations_previous_state = self.migrate_apply(tag)
return is_initial, db_migrations_previous_state
def active_env(self):
service = self.kube_api.resource_by_name('Service', self.service_name)
return None if not service else service.obj['spec']['selector'].get('color', None)
def other_env(self):
return {
'blue': 'green',
'green': 'blue'
}.get(self.active_env(), None)
def deploy(self, tag):
color = self.other_env() or 'blue'
applied_objects = self.apply('deployment', self.deployment_config, tag=tag, color=color)
self.wait_for_resource_running(
'Deployment',
'replicas',
lambda deployment: deployment.replicas if deployment.ready and self.pod_find(tag) else None,
self.deployment_timeout,
*applied_objects
)
return color
@log('Waiting for {resource_type} {prop} to become available')
def wait_for_resource_running(self, resource_type, prop, prop_extractor, timeout_seconds, *object_names):
def try_extract_value(resource_name):
try:
result = self.kube_api.resource_by_name(resource_type, resource_name)
return prop_extractor(result or {})
except (IndexError, KeyError, AttributeError):
return None
def extract_value_with_timeout(resource_name):
value = None
if timeout_seconds:
attempts = 0
while not value and attempts < timeout_seconds:
sleep(1)
attempts += 1
output('.', '', flush=True)
value = try_extract_value(resource_name)
else:
value = try_extract_value(resource_name)
if value:
output('\n{} {} {} is: {}'.format(resource_type, resource_name, prop, value))
elif timeout_seconds:
raise ActionFailedError(
'\nFailed after {} seconds elapsed. For more info try running: $ kubectl describe {} {}'.format(
timeout_seconds, resource_type, resource_name))
return value
values = [extract_value_with_timeout(name) for name in object_names]
return values
@log('Running smoke tests on {color} deployment...')
def smoke_test(self, color):
if self.smoke_service_config:
def service_host_extractor(service):
if self.is_minikube:
service_address = self.runner.start(cmd.MINIKUBE_SERVICE_URL.format(service.name), capture=True)
else:
service_address = get_loadbalancer_address(service)
return service_address if is_host_up(service_address) else None
applied_objects = self.apply('smoke service', self.smoke_service_config, color=color)
smoke_service_address = ','.join(self.wait_for_resource_running(
'Service',
'host',
service_host_extractor,
self.smoke_service_timeout,
*applied_objects
))
return_code = self.runner.start(self.smoke_tests_command, TEST_HOST=smoke_service_address, silent=True)
return return_code == 0
return True
@log('Promoting {color} deployment...')
def swap(self, color):
self.apply('public service', self.service_config, color=color)
self.wait_for_resource_running(
'Service',
'status',
lambda service: 'ready' if service.exists(ensure=True) else None,
self.service_timeout,
self.service_name
)
@log('Publishing...')
def publish(self):
next_tag = self.build()
self.push(next_tag)
next_color = self.deploy(next_tag)
is_initial, db_migrations_previous_state = self.migrate(next_tag)
health_ok = self.smoke_test(next_color)
if health_ok:
self.swap(next_color)
else:
if not is_initial:
self.migrate_rollback(next_tag, db_migrations_previous_state)
raise ActionFailedError('Cannot promote {} deployment because smoke tests failed'.format(next_color))
output('Done.')
@log('Rolling back to previous deployment...')
def rollback(self):
color = self.other_env()
if color:
self.swap(color)
else:
raise ActionFailedError('Cannot rollback to a previous environment because one does not exist.')
output('Done.')
| StarcoderdataPython |
33763 | from gherkin_to_markdown.expressions.expression import Expression
class SecondHeaderExpression(Expression):
def to_markdown(self, statement: str):
return f"##{statement.strip().replace(':', '', 1)[len(self.keyword):]}\n\n"
| StarcoderdataPython |
12818368 | <gh_stars>10-100
import cv2
import numpy as np
from python_path import PythonPath
from tqdm import tqdm
with PythonPath("."):
from afy.predictor_local import PredictorLocal
from afy.utils import Once, log, crop, pad_img, resize, TicToc
from afy.utils import load_stylegan_avatar
config_path = "fomm/config/vox-adv-256.yaml"
checkpoint_path = "vox-adv-cpk.pth.tar"
model = PredictorLocal(
config_path, checkpoint_path, relative=True, adapt_movement_scale=True
)
IMG_SIZE = 256
size = (IMG_SIZE, IMG_SIZE)
frame_proportion = 0.9
frame_offset_x = 0
frame_offset_y = 0
stream_img_size = None
cap = cv2.VideoCapture(0)
avatar = cv2.imread("avatars/apipe.jpg")[..., ::-1]
avatar = cv2.resize(avatar, size)
video_frames = []
start_recording = False
# Capture initial video
while True:
grabbed, frame = cap.read()
if not grabbed:
break
frame = frame[..., ::-1].copy()
if start_recording:
video_frames.append(frame.copy())
cv2.putText(
frame, "Recording", (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3,
)
cv2.imshow("original", frame[..., ::-1])
key = cv2.waitKey(1)
if key == 27: # ESC
break
elif key == ord("s"):
start_recording = not start_recording
cap.release()
model.set_source_image(avatar)
output = []
merge = True
# merge = False
for frame in tqdm(video_frames, total=len(video_frames)):
if stream_img_size is None:
stream_img_size = frame.shape[1], frame.shape[0]
video_img_size = (
frame.shape[1] * 2 if merge else frame.shape[1],
frame.shape[0],
)
video_writer = cv2.VideoWriter(
"out.mp4", cv2.VideoWriter_fourcc(*"MP4V"), 30, video_img_size,
)
input_frame, lrudwh = crop(
frame, p=frame_proportion, offset_x=frame_offset_x, offset_y=frame_offset_y,
)
input_frame = cv2.resize(input_frame, size)
out = model.predict(input_frame)
out = pad_img(out, stream_img_size)
out = cv2.resize(out, stream_img_size)
output.append(out)
if merge:
out = np.concatenate([frame, out], axis=1)
video_writer.write(out[..., ::-1])
video_writer.release()
for frame, out in zip(video_frames, output):
cv2.imshow("original", frame[..., ::-1])
cv2.imshow("deepfake", out[..., ::-1])
cv2.waitKey(1)
| StarcoderdataPython |
5131579 | from PyQt5 import QtGui
from hackedit.api import events
from hackedit.app import settings
from hackedit.api.widgets import PreferencePage
from hackedit.app.forms import settings_page_notifications_ui
class Notifications(PreferencePage):
"""
Preference page for the application notifications settings
"""
def __init__(self):
if QtGui.QIcon.hasThemeIcon('preferences-desktop-notification'):
icon = QtGui.QIcon.fromTheme('preferences-desktop-notification')
else:
icon = QtGui.QIcon.fromTheme('dialog-information')
super().__init__('Notifications', icon=icon, category='Environment')
self.ui = settings_page_notifications_ui.Ui_Form()
self.ui.setupUi(self)
self.ui.blacklist.currentItemChanged.connect(self.update_buttons)
self.ui.bt_rm.clicked.connect(self._rm_current_item_from_blacklist)
self.ui.bt_clear.clicked.connect(self.ui.blacklist.clear)
self.update_buttons()
def _rm_current_item_from_blacklist(self):
row = self.ui.blacklist.row(self.ui.blacklist.currentItem())
self.ui.blacklist.takeItem(row)
def update_buttons(self, *_):
self.ui.bt_clear.setEnabled(self.ui.blacklist.count() > 0)
self.ui.bt_rm.setEnabled(self.ui.blacklist.currentItem() is not None)
def reset(self):
self.ui.blacklist.clear()
self.ui.cb_allow_system_tray.setChecked(
settings.show_notification_in_sytem_tray())
self.ui.cb_info.setChecked(settings.auto_open_info_notification())
self.ui.cb_warning.setChecked(
settings.auto_open_warning_notification())
self.ui.cb_errors.setChecked(settings.auto_open_error_notification())
self.ui.blacklist.addItems(events.get_blacklist())
self.update_buttons()
@staticmethod
def restore_defaults():
settings.set_show_notification_in_sytem_tray(True)
settings.set_auto_open_info_notification(False)
settings.set_auto_open_warning_notification(True)
settings.set_auto_open_error_notification(True)
events.clear_blacklist()
def save(self):
settings.set_show_notification_in_sytem_tray(
self.ui.cb_allow_system_tray.isChecked())
settings.set_auto_open_info_notification(self.ui.cb_info.isChecked())
settings.set_auto_open_warning_notification(
self.ui.cb_warning.isChecked())
settings.set_auto_open_error_notification(
self.ui.cb_errors.isChecked())
events.set_blacklist([self.ui.blacklist.item(i).text()
for i in range(self.ui.blacklist.count())])
| StarcoderdataPython |
1718151 | '''tzinfo timezone information for Pacific/Fiji.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Fiji(DstTzInfo):
'''Pacific/Fiji timezone definition. See datetime.tzinfo for details'''
zone = 'Pacific/Fiji'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1915,10,25,12,6,20),
d(1998,10,31,14,0,0),
d(1999,2,27,14,0,0),
d(1999,11,6,14,0,0),
d(2000,2,26,14,0,0),
]
_transition_info = [
i(42840,0,'LMT'),
i(43200,0,'FJT'),
i(46800,3600,'FJST'),
i(43200,0,'FJT'),
i(46800,3600,'FJST'),
i(43200,0,'FJT'),
]
Fiji = Fiji()
| StarcoderdataPython |
3550248 | <filename>src/auspex/parameter.py
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
from auspex.log import logger
class Parameter(object):
""" Encapsulates the information for an experiment parameter"""
def __init__(self, name=None, unit=None, default=None,
value_range=None, allowed_values=None,
increment=None, snap=None):
self.name = name
self._value = default
self.unit = unit
self.default = default
self.method = None
self.instrument_tree = None
# These are primarily intended for Quince interoperation,
# but will maybe be useful for Auspex too...
self.value_range = value_range
self.allowed_values = allowed_values
self.increment = increment
if snap:
self.increment = snap
self.snap = snap
# Hooks to be called before or after updating a sweep parameter
self.pre_push_hooks = []
self.post_push_hooks = []
def add_pre_push_hook(self, hook):
self.pre_push_hooks.append(hook)
def add_post_push_hook(self, hook):
self.post_push_hooks.append(hook)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __str__(self):
result = ""
result += "%s" % str(self.value)
if self.unit:
result += " %s" % self.unit
return result
def __repr__(self):
result = "<Parameter(name='%s'" % self.name
result += ",value=%s" % repr(self.value)
if self.unit:
result += ",unit='%s'" % self.unit
return result + ")>"
def dict_repr(self):
"""Return a dictionary representation. Intended for Quince interop."""
param_dict = {}
param_dict['name'] = self.name
if isinstance(self, FloatParameter):
param_dict['type'] = 'float'
elif isinstance(self, IntParameter):
param_dict['type'] = 'int'
elif isinstance(self, BoolParameter):
param_dict['type'] = 'bool'
elif isinstance(self, FilenameParameter):
param_dict['type'] = 'filename'
elif isinstance(self, Parameter):
if self.allowed_values:
param_dict['type'] = 'combo'
param_dict['choices'] = self.allowed_values
else:
param_dict['type'] = 'str'
if isinstance(self, FloatParameter) or isinstance(self, IntParameter):
if self.value_range:
param_dict['low'] = min(self.value_range)
param_dict['high'] = max(self.value_range)
else:
param_dict['low'] = -1e15
param_dict['high'] = 1e15
param_dict['increment'] = 2e14
if self.increment:
param_dict['increment'] = self.increment
else:
param_dict['increment'] = 0.05*(param_dict['high']-param_dict['low'])
param_dict['snap'] = self.snap
if self.default:
param_dict['default'] = self.default
return param_dict
def assign_method(self, method):
logger.debug("Setting method of Parameter %s to %s" % (self.name, str(method)) )
self.method = method
def push(self):
if self.method is not None:
# logger.debug("Calling pre_push_hooks of Parameter %s with value %s" % (self.name, self._value) )
for pph in self.pre_push_hooks:
pph()
# logger.debug("Calling method of Parameter %s with value %s" % (self.name, self._value) )
self.method(self._value)
# logger.debug("Calling post_push_hooks of Parameter %s with value %s" % (self.name, self._value) )
for pph in self.post_push_hooks:
pph()
class FilenameParameter(Parameter):
def __init__(self, *args, **kwargs):
super(FilenameParameter, self).__init__(*args, **kwargs)
class ParameterGroup(Parameter):
""" An array of Parameters """
def __init__(self, params, name=None):
if name is None:
names = '('
for param in params:
names += param.name
self.name = names + ')'
else:
self.name = name
self.parameters = params
self._value = [param.value for param in params]
self.default = [param.defaul for param in params]
self.method = [param.method for param in params]
units = '('
for param in params:
if param.unit is None:
units += 'None'
else:
units += param.unit
self.unit = units + ')'
@property
def value(self):
return self._value
@value.setter
def value(self, values):
self._value = values
for param, value in zip(self.parameters, values):
param.value = value
def assign_method(self, methods):
for param, method in zip(self.parameters,methods):
param.assign_method(method)
def push(self):
for param in self.parameters:
param.push()
class FloatParameter(Parameter):
@property
def value(self):
return self._value
@value.setter
def value(self, value):
try:
self._value = float(value)
except ValueError:
raise ValueError("FloatParameter given non-float value of "
"type '%s'" % type(value))
def __repr__(self):
result = super(FloatParameter, self).__repr__()
return result.replace("<Parameter", "<FloatParameter", 1)
class IntParameter(Parameter):
@property
def value(self):
return self._value
@value.setter
def value(self, value):
try:
self._value = int(value)
except ValueError:
raise ValueError("IntParameter given non-int value of "
"type '%s'" % type(value))
def __repr__(self):
result = super(IntParameter, self).__repr__()
return result.replace("<Parameter", "<IntParameter", 1)
class BoolParameter(Parameter):
@property
def value(self):
return self._value
@value.setter
def value(self, value):
try:
self._value = bool(value)
except ValueError:
raise ValueError("BoolParameter given non-bool value of "
"type '%s'" % type(value))
def __repr__(self):
result = super(BoolParameter, self).__repr__()
return result.replace("<Parameter", "<BoolParameter", 1)
| StarcoderdataPython |
9726160 | from main.app import db
from flask_login import UserMixin
# The User model stores information about a human user. A user is identified by a user name or email address.
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
user_name = db.Column(db.String(50), nullable = True) # optional
email_address = db.Column(db.String(100), nullable = False, unique = True)
password_hash = db.Column(db.String(128), nullable = False)
full_name = db.Column(db.String(100), nullable = False)
info_status = db.Column(db.String, nullable = False) # JSON string indicating which info messages have been displayed to user
attributes = db.Column(db.String, nullable = False) # JSON string of additional information about user
deleted = db.Column(db.Boolean, nullable = False)
creation_timestamp = db.Column(db.DateTime, nullable = False)
role = db.Column(db.Integer, nullable = False)
def __repr__(self):
return '%d: %s' % (self.id, self.email_address)
def as_dict(self):
return {
'id': self.id,
'user_name': self.user_name,
'email_address': self.email_address,
'full_name': self.full_name,
'role': self.role,
'creation_timestamp': self.creation_timestamp.isoformat() + 'Z',
'deleted': self.deleted,
}
# roles
STANDARD_USER = 0
SYSTEM_ADMIN = 2 # this is an admin role for the entire site; organization admins are specified in OrganizationUser model
# The Key model holds access keys (used for programmatic API access to the server).
class Key(db.Model):
__tablename__ = 'keys'
id = db.Column(db.Integer, primary_key = True)
organization_id = db.Column(db.ForeignKey('resources.id'), nullable = False)
creation_user_id = db.Column(db.ForeignKey('users.id'), nullable = False)
revocation_user_id = db.Column(db.ForeignKey('users.id')) # null if not revoked
creation_timestamp = db.Column(db.DateTime, nullable = False)
revocation_timestamp = db.Column(db.DateTime) # null if not revoked
access_as_user_id = db.Column(db.ForeignKey('users.id')) # null if access as controller
access_as_controller_id = db.Column(db.ForeignKey('resources.id')) # null if access as user
key_part = db.Column(db.String(8), nullable = False)
key_hash = db.Column(db.String(128), nullable = False) # like a password hash, but for a key
# fix(soon): remove these and just use key_hash
key_storage = db.Column(db.String(), nullable = True)
key_storage_nonce = db.Column(db.String(), nullable = True)
def as_dict(self):
return {
'id': self.id,
'organization_id': self.organization_id,
'access_as_user_id': self.access_as_user_id,
'access_as_controller_id': self.access_as_controller_id,
'creation_timestamp': self.creation_timestamp.isoformat() + 'Z' if self.creation_timestamp else '',
'revocation_timestamp': self.revocation_timestamp.isoformat() + 'Z' if self.revocation_timestamp else '',
'key_part': self.key_part,
}
# The AccountRequest model holds requests for new users and new organizations.
# Account request may require approval before they are turned into accounts.
class AccountRequest(db.Model):
__tablename__ = 'account_requests'
id = db.Column(db.Integer, primary_key = True)
organization_name = db.Column(db.String(100)) # used for new organization
organization_id = db.Column(db.ForeignKey('resources.id')) # used to join existing organization
organization = db.relationship('Resource') # used to join existing organization
inviter_id = db.Column(db.ForeignKey('users.id')) # used to join existing organization
creation_timestamp = db.Column(db.DateTime, nullable = False)
redeemed_timestamp = db.Column(db.DateTime)
access_code = db.Column(db.String(40), nullable = False)
email_address = db.Column(db.String, nullable = False)
email_sent = db.Column(db.Boolean, nullable = False) # True if sent successfully
email_failed = db.Column(db.Boolean, nullable = False) # True if given up on sending
attributes = db.Column(db.String, nullable = False) # JSON field containing extra attributes
# The OrganizationUser model represents membership of users in organizations.
# A single user can belong to multiple organizations. A user can be an adminstrator for an organization.
class OrganizationUser(db.Model):
__tablename__ = 'organization_users'
id = db.Column(db.Integer, primary_key = True)
organization_id = db.Column(db.ForeignKey('resources.id'), nullable = False)
organization = db.relationship('Resource')
user_id = db.Column(db.ForeignKey('users.id'), nullable = False)
user = db.relationship('User')
is_admin = db.Column(db.Boolean, default = False, nullable = False)
def as_dict(self):
return {
'organization_id': self.organization_id,
'user_id': self.user_id,
'is_admin': self.is_admin,
}
| StarcoderdataPython |
5194543 | <gh_stars>1-10
from meli_challenge.core import CharactersGraph, SparkClient
from meli_challenge.core.input import CsvInput
from tests.utils import assert_dataframe_equality, create_df_from_collection
client = SparkClient()
client.create_session()
def test_core_top_interactions(spark_context, spark_session):
# arrange
graph = CharactersGraph(knowledge_base_input=CsvInput(path="data/dataset.csv"))
target_df = create_df_from_collection(
data=[
{
"name": "Tyrion-Lannister",
"sum_interactions": 2261,
"sum_interactions_book_1": 650,
"sum_interactions_book_2": 829,
"sum_interactions_book_3": 782,
},
{
"name": "Jon-Snow",
"sum_interactions": 1900,
"sum_interactions_book_1": 784,
"sum_interactions_book_2": 360,
"sum_interactions_book_3": 756,
},
{
"name": "Joffrey-Baratheon",
"sum_interactions": 1649,
"sum_interactions_book_1": 422,
"sum_interactions_book_2": 629,
"sum_interactions_book_3": 598,
},
],
spark_session=spark_session,
spark_context=spark_context,
)
# act
output_df = (
graph.summarize_characters_interactions(books=[1, 2, 3])
.coalesce(1)
.orderBy("sum_interactions", ascending=False)
.limit(3)
)
# assert
assert_dataframe_equality(output_df, target_df)
| StarcoderdataPython |
218418 | import functools
from abc import ABC, abstractmethod
from collections import Callable
from event_bus import EventBus as SimpleEventBus
class EventBus(ABC):
@abstractmethod
def subscribe_func_to_event(self, func: Callable, event_name: str) -> None:
raise NotImplementedError
@abstractmethod
def unsubscribe_func_from_event(self, func: Callable, event_name: str) -> None:
raise NotImplementedError
def subscribe(self, event_name: str) -> Callable:
def event_wrapper(func):
self.subscribe_func_to_event(func, event_name)
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return event_wrapper
@abstractmethod
def emit(self, event_name: str, *args, **kwargs) -> None:
raise NotImplementedError
class EventBusAdapter(EventBus):
def __init__(self):
self._bus = SimpleEventBus()
def subscribe_func_to_event(self, func: Callable, event_name: str) -> None:
self._bus.add_event(func, event_name)
def unsubscribe_func_from_event(self, func: Callable, event_name: str) -> None:
self._bus.remove_event(func.__name__, event_name)
def emit(self, event_name: str, *args, **kwargs) -> None:
self._bus.emit(event_name, *args, **kwargs)
class EventSubscriber(object):
def __init__(self, event_bus: EventBus, event_name: str):
self.event_bus = event_bus
self.event_name = event_name
def do(self, func: Callable, *args, **kwargs):
subscriber = functools.partial(func, *args, **kwargs)
self.event_bus.subscribe_func_to_event(subscriber, self.event_name)
| StarcoderdataPython |
3528954 |
nums = list(range(0,60))
for num in nums:
print("<option value=\""+str(num)+"\">"+str(num)+"</option>") | StarcoderdataPython |
8048703 | <gh_stars>0
import unittest
from ..argumentpickers import ArgumentByNamePicker, ArgumentByNameError
class TestArgumentByNamePicker(unittest.TestCase):
def test_it_can_pick_argument_by_name(self):
picker = ArgumentByNamePicker("b")
args = (1,2,3,4)
kwargs = {}
def func(a,b,c,d): pass
self.assertEquals(2, picker.argument(func, *args, **kwargs))
def test_it_can_pick_argument_by_name_2(self):
picker = ArgumentByNamePicker("d")
args = (1,2,3,4)
kwargs = {}
def func(a,b,c,d): pass
self.assertEquals(4, picker.argument(func, *args, **kwargs))
def test_it_can_pick_argument_from_kwargs(self):
picker = ArgumentByNamePicker("d")
args = (1,2,)
kwargs = {'c':3, 'd':4}
def func(a,b,c,d): pass
self.assertEquals(4, picker.argument(func, *args, **kwargs))
def test_it_raises_exception_when_argument_not_found(self):
picker = ArgumentByNamePicker("e")
args = (1,2,)
kwargs = {'c':3, 'd':4}
def func(a,b,c,d): pass
with self.assertRaises(ArgumentByNameError) as cm:
picker.argument(func, *args, **kwargs)
self.assertEquals(cm.exception.message, "Tried to pick argument with name 'e' from 'func', but it was not found")
| StarcoderdataPython |
5128079 | <filename>pyrestorm/exceptions/http.py<gh_stars>1-10
# HTTP Class Exceptions
class HttpException(Exception):
pass
class ServerErrorException(HttpException):
# HTTP 500
pass
class MethodNotAllowedException(HttpException):
# HTTP 405
pass
class NotFoundException(HttpException):
# HTTP 404
pass
class PermissionDeniedException(HttpException):
# HTTP 403
pass
class AuthorizationException(HttpException):
# HTTP 401
pass
class BadRequestException(HttpException):
# HTTP 400
pass
| StarcoderdataPython |
85487 | ##
# File: NEFImportTests.py
# Date: 06-Oct-2018 <NAME>
#
# Updates:
##
"""Test cases for NEFTranslator - simply import everything to ensure imports work"""
import unittest
import sys
if __package__ is None or __package__ == "":
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from commonsetup import TESTOUTPUT # noqa: F401 pylint: disable=import-error,unused-import
else:
from .commonsetup import TESTOUTPUT # noqa: F401 pylint: disable=relative-beyond-top-level
from wwpdb.utils.nmr.NEFTranslator.NEFTranslator import NEFTranslator
from wwpdb.utils.nmr.NmrDpUtility import NmrDpUtility
from wwpdb.utils.nmr.NmrDpReport import NmrDpReport
from wwpdb.utils.nmr.NmrStarToCif import NmrStarToCif
from wwpdb.utils.nmr.rci.RCI import RCI
from wwpdb.utils.nmr.BMRBChemShiftStat import BMRBChemShiftStat
class ImportTests(unittest.TestCase):
def testInstantiate(self):
_c = NEFTranslator() # noqa: F841
_npu = NmrDpUtility() # noqa: F841
_ndp = NmrDpReport() # noqa: F841
_nstc = NmrStarToCif() # noqa: F841
_rci = RCI() # noqa: F841
_bmrb = BMRBChemShiftStat() # noqa: F841
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3397487 | <filename>modules/components/variables_panel.py
from .. typecheck import *
from .. import core
from .. import ui
from .. import dap
from .. debugger.breakpoints import Breakpoints
from .. debugger.watch import Watch, WatchView
from . variable_component import VariableStateful, VariableStatefulComponent
from . layout import variables_panel_width
import sublime
class VariablesPanel (ui.div):
def __init__(self, breakpoints: Breakpoints, watch: Watch) -> None:
super().__init__()
self.scopes = [] #type: List[dap.Scope]
self.breakpoints = breakpoints
self.watch = watch
self.watch_view = WatchView(self.watch)
watch.on_updated.add(self.dirty)
def clear(self) -> None:
self.scopes = []
self.dirty()
def set_scopes(self, scopes: List[dap.Scope]) -> None:
self.scopes = scopes
self.dirty()
def on_edit_variable(self, variable: VariableStateful) -> None:
core.run(self.on_edit_variable_async(variable))
@core.coroutine
def on_edit_variable_async(self, variable: VariableStateful) -> core.awaitable[None]:
info = None #type: Optional[dap.DataBreakpointInfoResponse]
try:
info = yield from variable.variable.client.DataBreakpointInfoRequest(variable.variable)
except dap.Error as e:
pass
expression = variable.variable.evaluateName or variable.variable.name
value = variable.variable.value or ""
def on_edit_variable(value: str):
variable.set_value(value)
def copy_value():
sublime.set_clipboard(value)
def copy_expr():
sublime.set_clipboard(expression)
def add_watch():
self.watch.add(expression)
items = [
ui.InputListItem(
ui.InputText(
on_edit_variable,
"editing a variable",
),
"Edit Variable",
),
ui.InputListItem(
copy_expr,
"Copy Expression",
),
ui.InputListItem(
copy_value,
"Copy Value",
),
ui.InputListItem(
add_watch,
"Add Variable To Watch",
),
]
if info and info.id:
types = info.accessTypes or [""]
labels = {
dap.DataBreakpoint.write: "Break On Value Write",
dap.DataBreakpoint.readWrite: "Break On Value Read or Write",
dap.DataBreakpoint.read: "Break On Value Read",
}
def on_add_data_breakpoint(accessType: str):
assert info
self.breakpoints.data.add(info, accessType or None)
for acessType in types:
items.append(ui.InputListItem(
lambda: on_add_data_breakpoint(acessType),
labels.get(acessType) or "Break On Value Change"
))
ui.InputList(items).run()
def render(self) -> ui.div.Children:
scopes_items = [] #type: List[ui.div]
# expand the first scope only
first = True
for v in self.scopes:
variable = dap.Variable(v.client, v.name, "", v.variablesReference)
variable_stateful = VariableStateful(variable, None, on_edit=self.on_edit_variable)
component = VariableStatefulComponent(variable_stateful)
variable_stateful.on_dirty = component.dirty
if first:
first = False
variable_stateful.expand()
scopes_items.append(component)
return [
self.watch_view,
ui.div()[scopes_items]
]
| StarcoderdataPython |
3547525 | <filename>Componenttesting/GUItest.py
import tkinter as tk
a = False
def Run():
global a
if a != True:
a = True
slogan.configure(bg="red",text = "STOP")
else:
a = False
slogan.configure(bg="white",text = "RUN")
print(a)
return a
root = tk.Tk(className = 'My bot')
frame = tk.Frame(root)
frame.pack()
root.geometry("500x200")
slogan = tk.Button(frame,
text="RUN" ,
bg = "white" ,
height = 5,
width = 20,
command=Run)
slogan.pack(side=tk.LEFT)
button = tk.Button(frame,
text="QUIT",
height = 5,
width = 20,
fg="red",
command=quit)
button.pack(side=tk.RIGHT)
root.mainloop() | StarcoderdataPython |
6464557 |
import codecs
import logging
import sys
import requests
from massweb.targets.target import Target
from massweb.proxy_rotator.proxy_rotate import get_random_proxy
logging.basicConfig(format='%(asctime)s %(name)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger('pnknet')
logger.setLevel(logging.DEBUG)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
sys.stderr = codecs.getwriter('utf-8')(sys.stderr)
#FIXME: define in central const module
IDENTIFY_POSTS = 'identify_post'
GET = 'get'
POST = 'post'
def pnk_request_raw(target, request_type=GET, data=None, req_timeout=5,
proxy_list=None, hadoop_reporting=False, **kwargs):
if proxy_list is not None:
proxy = get_random_proxy(proxy_list)
else:
proxy = {}
logger.debug("pnk_request_raw input target type: %s", target.__class__.__name__)
try:
if isinstance(target, basestring):
url = target.strip()
elif isinstance(target, Target):
url = target.url
if target.data and data:
logger.error("%s.data and data are both specified using Target.data.", target.__class__.__name__)
logger.debug("%s.data %s; data: %s", target.__class__.__name__, target.data, data)
data = target.data or data
else:
raise TypeError("target must be an instance of Target or basestring not: %s", target.__class__.__name__)
if request_type == GET:
if hadoop_reporting:
logger.info("GET requesting %s", target)
response = requests.get(url, proxies=proxy,
timeout=req_timeout,
allow_redirects=False, **kwargs)
elif request_type == POST:
logger.debug(" POST Data: %s", data)
if hadoop_reporting:
logger.info("POST requesting %s", target)
response = requests.post(url, data=data, proxies=proxy,
timeout=req_timeout,
allow_redirects=False, **kwargs)
else:
raise ValueError("request_type must be either %s or %s", GET, POST)
logger.debug("pnk_request_raw output target type: %s", target.__class__.__name__)
return (target, response)
except:
# Threads suck at exceptions (or I do?), use this to mark failure
logger.debug("pnk_request_raw output target type: %s", target.__class__.__name__)
return (target, "__PNK_REQ_FAILED")
| StarcoderdataPython |
3395431 | <gh_stars>10-100
import fastscapelib_fortran as fs
import numpy as np
import xsimlab as xs
from .boundary import BorderBoundary
from .erosion import TotalErosion
from .grid import UniformRectilinearGrid2D
from .main import SurfaceTopography
from .tectonics import TectonicForcing
@xs.process
class BaseIsostasy:
"""Base class for isostasy.
Do not use this base class directly in a model! Use one of its
subclasses instead.
However, if you need one or several of the variables declared here
in another process, it is preferable to pass this base class in
:func:`xsimlab.foreign`.
"""
rebound = xs.variable(
dims=('y', 'x'),
intent='out',
groups=['bedrock_upward', 'surface_upward'],
description='isostasic rebound due to material loading/unloading'
)
@xs.process
class BaseLocalIsostasy(BaseIsostasy):
"""Base class for local isostasy.
Do not use this base class directly in a model! Use one of its
subclasses instead.
However, if you need one or several of the variables declared here
in another process, it is preferable to pass this base class in
:func:`xsimlab.foreign`.
"""
i_coef = xs.variable(description='local isostatic coefficient')
@xs.process
class LocalIsostasyErosion(BaseLocalIsostasy):
"""Local isostasic effect of erosion."""
erosion = xs.foreign(TotalErosion, 'height')
def run_step(self):
self.rebound = self.i_coef * self.erosion
@xs.process
class LocalIsostasyTectonics(BaseLocalIsostasy):
"""Local isostasic effect of tectonic forcing."""
bedrock_upward = xs.foreign(TectonicForcing, 'bedrock_upward')
def run_step(self):
self.rebound = -1. * self.i_coef * self.bedrock_upward
@xs.process
class LocalIsostasyErosionTectonics(BaseLocalIsostasy):
"""Local isostatic effect of both erosion and tectonic forcing.
This process makes no distinction between the density of rock and
the density of eroded material (one single coefficient is used).
"""
erosion = xs.foreign(TotalErosion, 'height')
surface_upward = xs.foreign(TectonicForcing, 'surface_upward')
def run_step(self):
self.rebound = self.i_coef * (self.erosion - self.surface_upward)
@xs.process
class Flexure(BaseIsostasy):
"""Flexural isostatic effect of both erosion and tectonic
forcing.
"""
lithos_density = xs.variable(
dims=[(), ('y', 'x')],
description='lithospheric rock density'
)
asthen_density = xs.variable(
description='asthenospheric rock density'
)
e_thickness = xs.variable(
description='effective elastic plate thickness'
)
shape = xs.foreign(UniformRectilinearGrid2D, 'shape')
length = xs.foreign(UniformRectilinearGrid2D, 'length')
ibc = xs.foreign(BorderBoundary, 'ibc')
elevation = xs.foreign(SurfaceTopography, 'elevation')
erosion = xs.foreign(TotalErosion, 'height')
surface_upward = xs.foreign(TectonicForcing, 'surface_upward')
def run_step(self):
ny, nx = self.shape
yl, xl = self.length
lithos_density = np.broadcast_to(
self.lithos_density, self.shape).flatten()
elevation_eq = self.elevation.flatten()
diff = (self.surface_upward - self.erosion).ravel()
# set elevation pre and post rebound
elevation_pre = elevation_eq + diff
elevation_post = elevation_pre.copy()
fs.flexure(elevation_post, elevation_eq, nx, ny, xl, yl,
lithos_density, self.asthen_density, self.e_thickness,
self.ibc)
self.rebound = (elevation_post - elevation_pre).reshape(self.shape)
| StarcoderdataPython |
6502896 | from typing import Optional
from fastapi import Depends, HTTPException, Path
from starlette.status import HTTP_404_NOT_FOUND
from app.api.dependencies.authentication import get_current_user_authorizer
from app.api.dependencies.database import get_repository
from app.db.errors import EntityDoesNotExist
from app.db.repositories.profiles import ProfilesRepository
from app.models.domain.profiles import Profile
from app.models.domain.users import User
from app.resources import strings
async def get_profile_by_username_from_path(
username: str = Path(..., min_length=1),
user: Optional[User] = Depends(get_current_user_authorizer(required=False)),
profiles_repo: ProfilesRepository = Depends(get_repository(ProfilesRepository)),
) -> Profile:
try:
return await profiles_repo.get_profile_by_username(
username=username,
requested_user=user,
)
except EntityDoesNotExist:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=strings.USER_DOES_NOT_EXIST_ERROR,
)
| StarcoderdataPython |
1688562 | #
# This file is implemented based on the author code of
# Lee et al., "A simple unified framework for detecting out-of-distribution samples and adversarial attacks", in NeurIPS 2018.
#
import os
import torch
import numpy as np
def compute_confscores(model, test_loader, outdir, id_flag):
total = 0
if id_flag == True:
outfile = os.path.join(outdir, 'confscores_id.txt')
else:
outfile = os.path.join(outdir, 'confscores_ood.txt')
f = open(outfile, 'w')
for data, _ in test_loader:
dists = model(data.cuda())
confscores, _ = torch.min(dists, dim=1)
total += data.size(0)
for i in range(data.size(0)):
f.write("{}\n".format(-confscores[i]))
f.close()
def get_auroc_curve(indir):
known = np.loadtxt(os.path.join(indir, 'confscores_id.txt'), delimiter='\n')
novel = np.loadtxt(os.path.join(indir, 'confscores_ood.txt'), delimiter='\n')
known.sort()
novel.sort()
end = np.max([np.max(known), np.max(novel)])
start = np.min([np.min(known),np.min(novel)])
num_k = known.shape[0]
num_n = novel.shape[0]
tp = -np.ones([num_k+num_n+1], dtype=int)
fp = -np.ones([num_k+num_n+1], dtype=int)
tp[0], fp[0] = num_k, num_n
k, n = 0, 0
for l in range(num_k+num_n):
if k == num_k:
tp[l+1:] = tp[l]
fp[l+1:] = np.arange(fp[l]-1, -1, -1)
break
elif n == num_n:
tp[l+1:] = np.arange(tp[l]-1, -1, -1)
fp[l+1:] = fp[l]
break
else:
if novel[n] < known[k]:
n += 1
tp[l+1] = tp[l]
fp[l+1] = fp[l] - 1
else:
k += 1
tp[l+1] = tp[l] - 1
fp[l+1] = fp[l]
tpr85_pos = np.abs(tp / num_k - .85).argmin()
tpr95_pos = np.abs(tp / num_k - .95).argmin()
tnr_at_tpr85 = 1. - fp[tpr85_pos] / num_n
tnr_at_tpr95 = 1. - fp[tpr95_pos] / num_n
return tp, fp, tnr_at_tpr85, tnr_at_tpr95
def compute_metrics(dir_name, verbose=False):
tp, fp, tnr_at_tpr85, tnr_at_tpr95 = get_auroc_curve(dir_name)
results = dict()
mtypes = ['TNR85', 'TNR95', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
if verbose:
print(' ', end='')
for mtype in mtypes:
print(' {mtype:6s}'.format(mtype=mtype), end='')
print('')
if verbose:
print('{stype:5s} '.format(stype=stype), end='')
results = dict()
# TNR85
mtype = 'TNR85'
results[mtype] = tnr_at_tpr85
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# TNR95
mtype = 'TNR95'
results[mtype] = tnr_at_tpr95
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# AUROC
mtype = 'AUROC'
tpr = np.concatenate([[1.], tp/tp[0], [0.]])
fpr = np.concatenate([[1.], fp/fp[0], [0.]])
results[mtype] = -np.trapz(1. - fpr, tpr)
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# DTACC
mtype = 'DTACC'
results[mtype] = .5 * (tp/tp[0] + 1. - fp/fp[0]).max()
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# AUIN
mtype = 'AUIN'
denom = tp + fp
denom[denom == 0.] = -1.
pin_ind = np.concatenate([[True], denom > 0., [True]])
pin = np.concatenate([[.5], tp/denom, [0.]])
results[mtype] = -np.trapz(pin[pin_ind], tpr[pin_ind])
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
# AUOUT
mtype = 'AUOUT'
denom = tp[0] - tp + fp[0] - fp
denom[denom == 0.] = -1.
pout_ind = np.concatenate([[True], denom > 0., [True]])
pout = np.concatenate([[0.], (fp[0] - fp)/denom, [.5]])
results[mtype] = np.trapz(pout[pout_ind], 1. - fpr[pout_ind])
if verbose:
print(' {val:6.3f}'.format(val=100.*results[mtype]), end='')
print('')
return results
def print_ood_results(ood_result):
for mtype in ['TNR85', 'TNR95', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']:
print(' {mtype:6s}'.format(mtype=mtype), end='')
print('\n{val:6.2f}'.format(val=100.*ood_result['TNR85']), end='')
print(' {val:6.2f}'.format(val=100.*ood_result['TNR95']), end='')
print(' {val:6.2f}'.format(val=100.*ood_result['AUROC']), end='')
print(' {val:6.2f}'.format(val=100.*ood_result['DTACC']), end='')
print(' {val:6.2f}'.format(val=100.*ood_result['AUIN']), end='')
print(' {val:6.2f}\n'.format(val=100.*ood_result['AUOUT']), end='')
print('')
def print_ood_results_total(ood_result_list):
TNR85_list = [100.*ood_result['TNR85'] for ood_result in ood_result_list]
TNR95_list = [100.*ood_result['TNR95'] for ood_result in ood_result_list]
AUROC_list = [100.*ood_result['AUROC'] for ood_result in ood_result_list]
DTACC_list = [100.*ood_result['DTACC'] for ood_result in ood_result_list]
AUIN_list = [100.*ood_result['AUIN'] for ood_result in ood_result_list]
AUOUT_list = [100.*ood_result['AUOUT'] for ood_result in ood_result_list]
for mtype in ['TNR85', 'TNR95', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']:
print(' {mtype:15s}'.format(mtype=mtype), end='')
print('\n{mean:6.2f} ({std:6.3f})'.format(mean=np.mean(TNR85_list), std=np.std(TNR85_list)), end='')
print(' {mean:6.2f} ({std:6.3f})'.format(mean=np.mean(TNR95_list), std=np.std(TNR95_list)), end='')
print(' {mean:6.2f} ({std:6.3f})'.format(mean=np.mean(AUROC_list), std=np.std(AUROC_list)), end='')
print(' {mean:6.2f} ({std:6.3f})'.format(mean=np.mean(DTACC_list), std=np.std(DTACC_list)), end='')
print(' {mean:6.2f} ({std:6.3f})'.format(mean=np.mean(AUIN_list), std=np.std(AUIN_list)), end='')
print(' {mean:6.2f} ({std:6.3f})\n'.format(mean=np.mean(AUOUT_list), std=np.std(AUOUT_list)), end='')
print('')
| StarcoderdataPython |
6515788 | <reponame>AndreasArne/redovisnings-sida<gh_stars>0
"""
Fixtures for integration tests
"""
# pylint: disable=redefined-outer-name,unused-argument
import pytest
@pytest.fixture
def user_dict():
"""
Dictionary with user data
"""
data = {
"username": "doe",
"password": "<PASSWORD>",
"password2": "<PASSWORD>",
"email": "<EMAIL>",
}
return data
@pytest.fixture
def post_dict():
"""
Dictionary with post data
"""
data = {
"post": "This is my first post",
}
return data
@pytest.fixture
def register_user_response(client, user_dict):
"""
Register user from user_dict
"""
response = client.post(
'/register',
data=user_dict,
follow_redirects=True,
)
return response
@pytest.fixture
def login_user_response(client, register_user_response, user_dict):
"""
Register user from user_dict
"""
response = client.post(
'/login',
data=user_dict,
follow_redirects=True,
)
return response
@pytest.fixture
def user_post_response(client, login_user_response, user_dict, post_dict):
"""
Register user from user_dict
"""
response = client.post(
"/",
data=post_dict,
follow_redirects=True,
)
return response
| StarcoderdataPython |
4872624 | """Test build file."""
| StarcoderdataPython |
1685974 | import getpass
import logging
import os
import subprocess
import sys
from typing import List
import verboselogs
from openpyn import __basefilepath__, root
verboselogs.install()
logger = logging.getLogger(__package__)
credentials_file_path = __basefilepath__ + "credentials"
def check_credentials() -> bool:
return os.path.exists(credentials_file_path)
def save_credentials() -> None:
if not sys.__stdin__.isatty():
logger.critical("Please run %s in interactive mode", __name__)
sys.exit(1)
if root.verify_running_as_root() is False:
logger.error("Please run as 'sudo openpyn --init' the first time. \
Root access is needed to store credentials in '%s'.", credentials_file_path)
sys.exit(1)
else:
logger.info("Storing credentials in '%s' with openvpn \
compatible 'auth-user-pass' file format", credentials_file_path)
username = input("Enter your username for NordVPN, i.e <EMAIL>: ")
password = <PASSWORD>("Enter the password for NordVPN: ")
try:
with open(credentials_file_path, 'w') as creds:
creds.write(username + "\n")
creds.write(password + "\n")
creds.close()
# Change file permission to 600
subprocess.check_call(["sudo", "chmod", "600", credentials_file_path])
logger.info("Awesome, the credentials have been saved in '%s'", credentials_file_path)
except (IOError, OSError):
logger.error("IOError while creating 'credentials' file.")
return
| StarcoderdataPython |
11265643 | """The schema.org Person model.
For reference see schema.org: https://schema.org/Person
"""
from dataclasses import dataclass
from . import Thing
@dataclass
class Person(Thing):
"""
The schema.org Person model
Attributes
----------
additionalName
Text
An additional name for a Person, can be used for a middle name.
address
PostalAddress or Text
Physical address of the item.
affiliation
Organization
An organization that this person is affiliated with. For example, a
school/university, a club, or a team.
alumniOf
EducationalOrganization or Organization
An organization that the person is an alumni of.
Inverse property: alumni.
award
Text
An award won by or for this item. Supersedes awards.
birthDate
Date
Date of birth.
birthPlace
Place
The place where the person was born.
brand
Brand or Organization
The brand(s) associated with a product or service, or the brand(s)
maintained by an organization or business person.
callSign
Text
A callsign, as used in broadcasting and radio communications to
identify people, radio and TV stations, or vehicles.
children
Person
A child of the person.
colleague
Person or URL
A colleague of the person. Supersedes colleagues.
contactPoint
ContactPoint
A contact point for a person or organization. Supersedes contactPoints.
deathDate
Date
Date of death.
deathPlace
Place
The place where the person died.
duns
Text
The Dun & Bradstreet DUNS number for identifying an organization or
business person.
email
Text
Email address.
familyName
Text
Family name. In the U.S., the last name of an Person. This can be used
along with givenName instead of the name property.
faxNumber
Text
The fax number.
follows
Person
The most generic uni-directional social relation.
funder
Organization or Person
A person or organization that supports (sponsors) something through
some kind of financial contribution.
gender
GenderType or Text
Gender of something, typically a Person, but possibly also fictional
characters, animals, etc. While http://schema.org/Male and
http://schema.org/Female may be used, text strings are also acceptable
for people who do not identify as a binary gender. The gender property
can also be used in an extended sense to cover e.g. the gender of
sports teams. As with the gender of individuals, we do not try to
enumerate all possibilities. A mixed-gender SportsTeam can be indicated
with a text value of "Mixed".
givenName
Text
Given name. In the U.S., the first name of a Person. This can be used
along with familyName instead of the name property.
globalLocationNumber
Text
The Global Location Number (GLN, sometimes also referred to as
International Location Number or ILN) of the respective organization,
person, or place. The GLN is a 13-digit number used to identify parties
and physical locations.
hasCredential
EducationalOccupationalCredential
A credential awarded to the Person or Organization.
hasOccupation
Occupation
The Person's occupation. For past professions, use Role for expressing
dates.
hasOfferCatalog
OfferCatalog
Indicates an OfferCatalog listing for this Organization, Person, or
Service.
hasPOS
Place
Points-of-Sales operated by the organization or person.
height
Distance or QuantitativeValue
The height of the item.
homeLocation
ContactPoint or Place
A contact location for a person's residence.
honorificPrefix
Text
An honorific prefix preceding a Person's name such as Dr/Mrs/Mr.
honorificSuffix
Text
An honorific suffix preceding a Person's name such as M.D. /PhD/MSCSW.
interactionStatistic
InteractionCounter
The number of interactions for the CreativeWork using the WebSite or
SoftwareApplication. The most specific child type of InteractionCounter
should be used. Supersedes interactionCount.
isicV4
Text
The International Standard of Industrial Classification of All Economic
Activities (ISIC), Revision 4 code for a particular organization,
business person, or place.
jobTitle
DefinedTerm or Text
The job title of the person (for example, Financial Manager).
knows
Person
The most generic bi-directional social/work relation.
knowsAbout
Text or Thing or URL
Of a Person, and less typically of an Organization, to indicate a topic
that is known about - suggesting possible expertise but not implying
it. We do not distinguish skill levels here, or relate this to
educational content, events, objectives or JobPosting descriptions.
knowsLanguage
Language or Text
Of a Person, and less typically of an Organization, to indicate a known
language. We do not distinguish skill levels or reading/writing/
speaking/signing here. Use language codes from the IETF BCP 47
standard.
makesOffer
Offer
A pointer to products or services offered by the organization or
person.
Inverse property: offeredBy.
memberOf
Organization or ProgramMembership
An Organization (or ProgramMembership) to which this Person or
Organization belongs.
Inverse property: member.
naics
Text
The North American Industry Classification System (NAICS) code for a
particular organization or business person.
nationality
Country
Nationality of the person.
netWorth
MonetaryAmount or PriceSpecification
The total financial value of the person as calculated by subtracting
assets from liabilities.
owns
OwnershipInfo or Product
Products owned by the organization or person.
parent
Person
A parent of this person. Supersedes parents.
performerIn
Event
Event that this person is a performer or participant in.
publishingPrinciples
CreativeWork or URL
The publishingPrinciples property indicates (typically via URL) a
document describing the editorial principles of an Organization (or
individual e.g. a Person writing a blog) that relate to their
activities as a publisher, e.g. ethics or diversity policies. When
applied to a CreativeWork (e.g. NewsArticle) the principles are those
of the party primarily responsible for the creation of the
CreativeWork.
While such policies are most typically expressed in natural language,
sometimes related information (e.g. indicating a funder) can be
expressed using schema.org terminology.
relatedTo
Person
The most generic familial relation.
seeks
Demand
A pointer to products or services sought by the organization or person
(demand).
sibling
Person
A sibling of the person. Supersedes siblings.
sponsor
Organization or Person
A person or organization that supports a thing through a pledge,
promise, or financial contribution. e.g. a sponsor of a Medical Study
or a corporate sponsor of an event.
spouse
Person
The person's spouse.
taxID
Text
The Tax / Fiscal ID of the organization or person, e.g. the TIN in the
US or the CIF/NIF in Spain.
telephone
Text
The telephone number.
vatID
Text
The Value-added Tax ID of the organization or person.
weight
QuantitativeValue
The weight of the product or person.
workLocation
ContactPoint or Place
A contact location for a person's place of work.
worksFor
Organization
Organizations that the person works for.
Attributes derived from Thing
-----------------------------
identifier
PropertyValue or Text or URL
The identifier property represents any kind of identifier for any kind
of Thing, such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides
dedicated properties for representing many of these, either as textual
strings or as URL (URI) links. See background notes for more details.
name
Text
The name of the item.
description
Text
A description of the item.
url
URL
URL of the item.
additionalType
URL
An additional type for the item, typically used for adding more
specific types from external vocabularies in microdata syntax. This is
a relationship between something and a class that the thing is in. In
RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'
attribute - for multiple types. Schema.org tools may have only weaker
understanding of extra types, in particular those defined externally.
alternateName
Text
An alias for the item.
disambiguatingDescription
Text
A sub property of description. A short description of the item used to
disambiguate from other, similar items. Information from other
properties (in particular, name) may be necessary for the description
to be useful for disambiguation.
image
ImageObject or URL
An image of the item. This can be a URL or a fully described
ImageObject.
mainEntityOfPage
CreativeWork or URL
Indicates a page (or other CreativeWork) for which this thing is the
main entity being described. See background notes for details.
Inverse property: mainEntity.
potentialAction
Action
Indicates a potential Action, which describes an idealized action in
which this thing would play an 'object' role.
sameAs
URL
URL of a reference Web page that unambiguously indicates the item's
identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or
official website.
subjectOf
CreativeWork or Event
A CreativeWork or Event about this Thing. Inverse property: about.
"""
additionalName: str = None
address = None
affiliation = None
alumniOf = None
award: str = None
birthDate = None
birthPlace = None
brand = None
callSign: str = None
children = None
colleague = None
contactPoint = None
deathDate = None
deathPlace = None
duns: str = None
email: str = None
familyName: str = None
faxNumber: str = None
follows = None
funder = None
gender = None
givenName: str = None
globalLocationNumber: str = None
hasCredential = None
hasOccupation = None
hasOfferCatalog = None
hasPOS = None
height = None
homeLocation = None
honorificPrefix: str = None
honorificSuffix: str = None
interactionStatistic = None
isicV4: str = None
jobTitle = None
knows = None
knowsAbout = None
knowsLanguage = None
makesOffer = None
memberOf = None
naics: str = None
nationality = None
netWorth = None
owns = None
parent = None
performerIn = None
publishingPrinciples = None
relatedTo = None
seeks = None
sibling = None
sponsor = None
spouse = None
taxID: str = None
telephone: str = None
vatID: str = None
weight = None
workLocation = None
worksFor = None
| StarcoderdataPython |
11261356 | # -*- coding:utf-8 -*-
import sys
import os
import argparse
from paths import *
# Mean color to subtract before propagating an image through a DNN
MEAN_COLOR = [103.062623801, 115.902882574, 123.151630838]
parser = argparse.ArgumentParser(description='Train or eval SSD model with goodies.')
# The name of your experiment
parser.add_argument("--run_name", type=str, required=True)
# The number of checkpoint (in thousands) you want to restore from
parser.add_argument("--ckpt", default=0, type=int)
# The dataset you want to train/test the model on
parser.add_argument("--dataset", default='voc07', choices=['voc07', 'voc12-train', 'voc12-val',
'voc12-trainval', 'voc07+12',
'coco', 'voc07+12-segfull',
'voc07+12-segmentation',
'coco-seg'])
# The split of the dataset you want to train/test on
parser.add_argument("--split", default='train', choices=['train', 'test', 'val', 'trainval',
'train-segmentation', 'val-segmentation',
'train-segmentation-original',
'valminusminival2014', 'minival2014',
'test-dev2015', 'test2015'])
# The network you use as a base network (backbone)
parser.add_argument("--trunk", default='resnet50', choices=['resnet50', 'vgg16'])
# Either the last layer has a stride of 4 of of 8, if True an extra layer is appended
parser.add_argument("--x4", default=False, action='store_true')
# Which image size to chose for training
parser.add_argument("--image_size", default=300, type=int)
# If True, shares the weights for classifiers of bboxes on each scale
parser.add_argument("--head", default='nonshared', choices=['shared', 'nonshared'])
# Sampling method for deep features resizing
parser.add_argument("--resize", default='bilinear', choices=['bilinear', 'nearest'])
# The number of feature maps in the layers appended to a base network
parser.add_argument("--top_fm", default=512, type=int)
# The size of conv kernel in classification/localization mapping for bboxes
parser.add_argument("--det_kernel", default=3, type=int)
# TRAINING FLAGS
parser.add_argument("--max_iterations", default=1000000, type=int)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--weight_decay", default=5e-5, type=float)
parser.add_argument("--bn_decay", default=0.9, type=float)
parser.add_argument("--learning_rate", default=1e-4, type=float)
# For training with warmup, chose the number of steps
parser.add_argument("--warmup_step", default=0, type=int)
#For training with warmup, chose the starting learning rate
parser.add_argument("--warmup_lr", default=1e-5, type=float)
# Optimizer of choice
parser.add_argument("--optimizer", default='adam', choices=['adam', 'nesterov'])
# To what ratio of images apply zoomout data augmentation
parser.add_argument("--zoomout_prob", default=0.5, type=float)
# A list of steps where after each a learning rate is multiplied by 1e-1
parser.add_argument("--lr_decay", default=[], nargs='+', type=int)
# Random initialization of a base network
parser.add_argument("--random_trunk_init", default=False, action='store_true')
# SEGMENTATION/DETECTION FLAGS
# if you want a net to perform detection
parser.add_argument("--detect", default=False, action='store_true')
# if you want a network to perform segmentation
parser.add_argument("--segment", default=False, action='store_true')
# Nope
parser.add_argument("--no_seg_gt", default=False, action='store_true')
# The size of intermediate representations before concatenating and segmenting
parser.add_argument("--n_base_channels", default=64, type=int)
# The size of the conv filter used to map feature maps to intermediate representations before segmentation
parser.add_argument("--seg_filter_size", default=1, type=int, choices=[1, 3])
# EVALUATION FLAGS
# Automatic evaluation of several checkpoints
parser.add_argument("--batch_eval", default=False, action='store_true')
# number of checkpoint in thousands you want to start the evaluation from
parser.add_argument("--min_ckpt", default=0, type=int)
# a step between checkpoints to evaluate in thousands
parser.add_argument("--step", default=1, type=int)
# How many top scoring bboxes per category are passed to nms
parser.add_argument("--top_k_nms", default=400, type=int)
# How many top scoring bboxes per category are left after nms
parser.add_argument("--top_k_after_nms", default=50, type=int)
# How many top scoring bboxes in total are left after nms for an image
parser.add_argument("--top_k_post_nms", default=200, type=int)
# The threshold of confidence above which a bboxes is considered as a class example
parser.add_argument("--conf_thresh", default=0.01, type=float)
# IoU threshold for nms
parser.add_argument("--nms_thresh", default=0.45, type=float)
# IoU threshold positive criteria in PASCAL VOC challenge
parser.add_argument("--voc_iou_thresh", default=0.50, type=float)
# Filter candidate boxes by thresholding the score.
# Needed to make clean final detection results.
parser.add_argument("--eval_min_conf", default=0.0, type=float)
# First n processed images will be saved with regressed bboxes/masks drawn
parser.add_argument("--save_first_n", default=0, type=int)
args = parser.parse_args()
train_dir = os.path.join(CKPT_ROOT, args.run_name)
# Configurations for data augmentation
data_augmentation_config = {
'X_out': 4,
'brightness_prob': 0.5,
'brightness_delta': 0.125,
'contrast_prob': 0.5,
'contrast_delta': 0.5,
'hue_prob': 0.5,
'hue_delta': 0.07,
'saturation_prob': 0.5,
'saturation_delta': 0.5,
'sample_jaccards': [0.0, 0.1, 0.3, 0.5, 0.7, 0.9],
'flip_prob': 0.5,
'crop_max_tries': 50,
'zoomout_color': [x/255.0 for x in reversed(MEAN_COLOR)],
}
config_vgg = {
'image_size': 300,
'smallest_scale': 0.1,
'min_scale': 0.2,
'max_scale': 0.9,
'layers': ['conv4_3', 'fc7', 'conv6_2', 'conv7_2', 'conv8_2', 'conv9_2'],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'prior_variance': [0.1, 0.1, 0.2, 0.2],
'train_augmentation': data_augmentation_config,
'fm_sizes': [37, 18, 9, 5, 3, 1],
}
evaluation_logfile = '1evaluations.txt'
normAP_constant = 400
config_resnet_ssd512_x4 = {'image_size': 512,
'smallest_scale': 0.02,
'min_scale': 0.08,
'max_scale': 0.95,
'layers': ['ssd_back/block_rev1', 'ssd_back/block_rev2', 'ssd_back/block_rev3', 'ssd_back/block_rev4', 'ssd_back/block_rev5', 'ssd_back/block_rev6', 'ssd_back/block_rev7', 'ssd/pool6'],
'aspect_ratios': [[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
'train_augmentation': data_augmentation_config,
'prior_variance': [0.1, 0.1, 0.2, 0.2],
'fm_sizes': [128, 64, 32, 16, 8, 4, 2, 1],
}
config_resnet_ssd512_nox4 = {'image_size': 512,
'smallest_scale': 0.04,
'min_scale': 0.1,
'max_scale': 0.95,
'layers': ['ssd_back/block_rev2', 'ssd_back/block_rev3', 'ssd_back/block_rev4', 'ssd_back/block_rev5', 'ssd_back/block_rev6', 'ssd_back/block_rev7', 'ssd/pool6'],
'aspect_ratios': [[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
'train_augmentation': data_augmentation_config,
'prior_variance': [0.1, 0.1, 0.2, 0.2],
'fm_sizes': [64, 32, 16, 8, 4, 2, 1],
}
config_resnet_nox4 = {'image_size': 300,
'smallest_scale': 0.1,
'min_scale': 0.2,
'max_scale': 0.95,
'layers': ['ssd_back/block_rev2', 'ssd_back/block_rev3', 'ssd_back/block_rev4', 'ssd_back/block_rev5', 'ssd_back/block_rev6' , 'ssd/pool6'],
'aspect_ratios': [[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
'train_augmentation': data_augmentation_config,
'prior_variance': [0.1, 0.1, 0.2, 0.2],
'fm_sizes': [38, 19, 10, 5, 3, 1],
}
config_resnet_x4 = {'image_size': 300,
'smallest_scale': 0.04,
'min_scale': 0.1,
'max_scale': 0.95,
'layers': [ 'ssd_back/block_rev1', 'ssd_back/block_rev2', 'ssd_back/block_rev3', 'ssd_back/block_rev4', 'ssd_back/block_rev5', 'ssd_back/block_rev6', 'ssd/pool6'],
'aspect_ratios': [[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
'train_augmentation': data_augmentation_config,
'prior_variance': [0.1, 0.1, 0.2, 0.2],
'fm_sizes': [75, 38, 19, 10, 5, 3, 1],
}
if args.trunk == 'resnet50' and args.x4 and args.image_size == 300:
config = config_resnet_x4
if args.trunk == 'resnet50' and args.x4 and args.image_size == 512:
config = config_resnet_ssd512_x4
if args.trunk == 'vgg16' and args.x4:
raise NotImplementedError
if args.trunk in ['resnet50', 'resnet101'] and not args.x4 and args.image_size == 300:
config = config_resnet_nox4
if args.trunk in ['resnet50', 'resnet101'] and not args.x4 and args.image_size == 512:
config = config_resnet_ssd512_nox4
if args.trunk == 'vgg16' and not args.x4:
config = config_vgg
def get_logging_config(run):
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s]: %(message)s'
},
'short': {
'format': '[%(levelname)s]: %(message)s'
},
},
'handlers': {
'default': {
'level': 'INFO',
'formatter': 'short',
'class': 'logging.StreamHandler',
},
'file': {
'level': 'DEBUG',
'formatter': 'standard',
'class': 'logging.FileHandler',
'filename': LOGS+run+'.log'
},
},
'loggers': {
'': {
'handlers': ['default', 'file'],
'level': 'DEBUG',
'propagate': True
},
}
}
| StarcoderdataPython |
11307951 | import csv
import lxml.html
import login
session = login.session
class Page():
def __init__(self, url):
self.url = url
self.response = session.get(url)
self.root = lxml.html.fromstring(self.response.content)
def top_index():
"""Have manually checked: this is complete for categorynames in course_index"""
page = Page("https://www.aldarayn.com/course/index.php?categoryid=17")
options = page.root.xpath("//option")
option_ids = [x.get("value") for x in options]
option_names = [x.text for x in options]
return zip(option_ids, option_names)
def course_index(course_id):
page = Page("https://www.aldarayn.com/course/index.php?categoryid={}&perpage=99999999".format(course_id))
course_tags = page.root.xpath("//*[@class='coursename']/a")
course_names = [x.text for x in course_tags]
course_urls = [x.get('href') for x in course_tags]
return zip(course_names, course_urls)
def all_courses():
for top_id, top_name in top_index():
for course_name, course_url in course_index(top_id):
yield [top_id, top_name, course_name, course_url]
def csv_output():
with open('sample.csv', 'w', newline='', encoding='utf-8') as csvfile:
csvout = csv.writer(csvfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
# spamwriter.writerow("")
i = 0
for top_id, top_name in top_index():
for course_name, course_url in course_index(top_id):
i = i + 1
csvout.writerow([top_id,
top_name, '=GOOGLETRANSLATE(B{}, "ar", "en")'.format(i),
course_name, '=GOOGLETRANSLATE(D{}, "ar", "en")'.format(i),
course_url])
# csv_output()
| StarcoderdataPython |
305547 | import srmanager.sr
srm = srmanager.sr.SR()
top = srm.get_topology()
#for n in top:
# for l in top[n]:
# print "{} node, {} link, {} src-tp".format(n,l,top[n][l]['source-tp'])
srm.del_all_flows(top)
srm.add_sr_flows(top)
srm.listen_to_topology()
| StarcoderdataPython |
357517 | from qtpy.QtCore import QModelIndex, Qt, QAbstractItemModel
from qtpy.QtGui import QPainter
from qtpy.QtWidgets import QStyledItemDelegate, QStyleOptionViewItem, QWidget, QLineEdit
class LineEditDelegate(QStyledItemDelegate):
"""Custom editing delegate that allows renaming text and updating placeholder text in a line edit.
This class was written for using with the DataSelectorView.
"""
def __init__(self, parent=None):
super(LineEditDelegate, self).__init__(parent)
self._default_text = "Untitled"
def createEditor(self, parent: QWidget,
option: QStyleOptionViewItem,
index: QModelIndex) -> QWidget:
editor = QLineEdit(parent)
editor.setPlaceholderText(self._default_text)
editor.setFrame(False)
return editor
def setEditorData(self, editor: QWidget, index: QModelIndex):
value = index.model().data(index, Qt.DisplayRole)
editor.setText(value)
def setModelData(self, editor: QWidget,
model: QAbstractItemModel,
index: QModelIndex):
text = editor.text()
if text == "":
text = editor.placeholderText()
# Update the "default" text to the previous value edited in
self._default_text = text
model.setData(index, text, Qt.DisplayRole)
def paint(self, painter: QPainter, option: QStyleOptionViewItem, index: QModelIndex):
super(LineEditDelegate, self).paint(painter, option, index)
return
# if index.data(role=EnsembleModel.active_role):
# active_brush = QBrush(Qt.cyan)
# painter.save()
# painter.fillRect(option.rect, active_brush)
# painter.restore()
#
# super(LineEditDelegate, self).paint(painter, option, index) | StarcoderdataPython |
6400045 | <reponame>BasicBeluga/flask-oauth2-login<gh_stars>10-100
import os
from flask import Flask, jsonify
from flask_oauth2_login import GoogleLogin
app = Flask(__name__)
app.config.update(
SECRET_KEY="secret",
GOOGLE_LOGIN_REDIRECT_SCHEME="http",
)
for config in (
"GOOGLE_LOGIN_CLIENT_ID",
"GOOGLE_LOGIN_CLIENT_SECRET",
):
app.config[config] = os.environ[config]
google_login = GoogleLogin(app)
@app.route("/")
def index():
return """
<html>
<a href="{}">Login with Google</a>
""".format(google_login.authorization_url())
@google_login.login_success
def login_success(token, profile):
return jsonify(token=token, profile=profile)
@google_login.login_failure
def login_failure(e):
return jsonify(error=str(e))
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| StarcoderdataPython |
6400166 | <filename>peppermynt/cmds/__init__.py<gh_stars>0
#!/usr/bin/env python
from .generate import Generate, Gen
from .serve import Serve
from .watch import Watch
| StarcoderdataPython |
1693862 | <reponame>oevseev/resistance-bot
import logging
import random
from enum import Enum
from typing import Dict, List, Optional
import telegram
MIN_PLAYERS, MAX_PLAYERS = 5, 10
# Party size per round for every possible player count
PARTY_SIZES = {
5: [2, 3, 2, 3, 3],
6: [2, 3, 4, 3, 4],
7: [2, 3, 3, 4, 4],
8: [3, 4, 4, 5, 5],
9: [3, 4, 4, 5, 5],
10: [3, 4, 4, 5, 5]
}
# Number of rounds the side needs to win to win the game
WIN_LIMIT = 3
# Maximum number of failed votes per round. When reached, spies win the round
VOTE_LIMIT = 5
# Minimum number of players that enables the rule which states that the spies must play
# at least 2 black cards in the 4th round to win it
MIN_2IN4TH = 7
logger = logging.getLogger(__name__)
class GameError(Exception):
pass
class GameState(Enum):
NOT_STARTED = 0
PROPOSAL_PENDING = 1
PARTY_VOTE_IN_PROGRESS = 2
PARTY_VOTE_RESULTS = 3
MISSION_VOTE_IN_PROGRESS = 4
MISSION_VOTE_RESULTS = 5
GAME_OVER = 6
class Vote:
def __init__(self, party: List[telegram.User]):
self.party = party
self.ballots: Dict[telegram.User, bool] = {}
@property
def outcome(self):
# Party is appointed if the majority of players voted affirmative
values = list(self.ballots.values())
return values.count(True) > values.count(False)
class Round:
def __init__(self, winning_count: int):
self.winning_count = winning_count
self.votes: List[Vote] = []
self.ballots: Dict[telegram.User, bool] = {}
@property
def last_vote(self):
if self.votes:
return self.votes[-1]
return None
@property
def can_vote(self):
return len(self.votes) < VOTE_LIMIT
@property
def outcome(self):
# Spies win if vote limit is exceeded
if not self.can_vote:
return False
# Spies win if they deal a needed number of black cards
return list(self.ballots.values()).count(False) < self.winning_count
class GameInstance:
def __init__(self, chat: telegram.Chat, creator: Optional[telegram.User] = None):
self.chat = chat
self.creator = creator
self.state = GameState.NOT_STARTED
self.players: List[telegram.User] = []
self.spies: List[telegram.User] = []
self.rounds: List[Round] = []
self._leader_idx = -1
def next_state(self):
if self.state == GameState.NOT_STARTED:
if not MIN_PLAYERS <= len(self.players) <= MAX_PLAYERS:
raise GameError("The number of players must be between {0} and {1}!".format(MIN_PLAYERS, MAX_PLAYERS))
self._assign_spies()
self._next_round_or_gameover()
elif self.state == GameState.PARTY_VOTE_RESULTS:
if self.current_vote.outcome:
self.state = GameState.MISSION_VOTE_IN_PROGRESS
else:
self._next_leader()
if self.current_round.can_vote:
self.state = GameState.PROPOSAL_PENDING
else:
self._next_round_or_gameover()
elif self.state == GameState.MISSION_VOTE_RESULTS:
self._next_leader()
self._next_round_or_gameover()
else:
raise GameError("Current game state ({0}) is changed automatically.".format(self.state))
def register_player(self, user: telegram.User):
if self.state != GameState.NOT_STARTED:
raise GameError("Can't register for an already started game!")
# Should work fine as telegram.User compares user ids, not internal Python ids
if user in self.players:
raise GameError("Can't register twice!")
self.players.append(user)
self._log("Registered player %s", user.name)
def propose_party(self, user: telegram.User, users: List[telegram.User]):
self._assert_registered(user)
if self.state != GameState.PROPOSAL_PENDING:
raise GameError("Party proposal not pending!")
if user != self.leader:
raise GameError("Only leader can propose a party!")
if len(users) != self.current_party_size:
raise GameError("Party must have {0} members!".format(self.current_party_size))
for user in users:
if user not in self.players:
raise GameError("Can't propose non-registered user {0}!".format(user.name))
self.current_round.votes.append(Vote(users))
self.state = GameState.PARTY_VOTE_IN_PROGRESS
def vote_party(self, user: telegram.User, outcome: bool):
self._assert_registered(user)
if self.state != GameState.PARTY_VOTE_IN_PROGRESS:
raise GameError("Party vote not in progress!")
if user in self.current_vote.ballots:
raise GameError("Can't vote twice!")
self.current_vote.ballots[user] = outcome
self._log("User %s votes %s", user.name, "affirmative" if outcome else "negative")
# Proceed to the next state when all players voted
if len(self.current_vote.ballots) >= len(self.players):
self.state = GameState.PARTY_VOTE_RESULTS
self._log("Vote over: party is %s", "appointed" if self.current_vote.outcome else "rejected")
def vote_mission(self, user: telegram.User, outcome: bool):
self._assert_registered(user)
if self.state != GameState.MISSION_VOTE_IN_PROGRESS:
raise GameError("Mission vote not in progress!")
if user in self.current_round.ballots:
raise GameError("Can't vote twice!")
if user not in self.current_party:
raise GameError("Only party members can vote!")
if not outcome and user not in self.spies:
raise GameError("Only spies can vote black!")
self.current_round.ballots[user] = outcome
self._log("User %s votes %s", user.name, "red" if outcome else "black")
if len(self.current_round.ballots) >= self.current_party_size:
self.state = GameState.MISSION_VOTE_RESULTS
self._log("Round over: mission %s", "successful" if self.current_round.outcome else "failed")
@property
def state(self):
return self._state
@state.setter
def state(self, value: GameState):
self._state = value
self._log("State is now %s", value)
@property
def current_round(self):
if self.state not in [GameState.NOT_STARTED, GameState.GAME_OVER]:
return self.rounds[-1]
return None
@property
def current_vote(self):
if self.state in [GameState.PARTY_VOTE_IN_PROGRESS, GameState.PARTY_VOTE_RESULTS]:
return self.current_round.last_vote
return None
@property
def current_party(self):
if self.state in [GameState.PARTY_VOTE_IN_PROGRESS, GameState.PARTY_VOTE_RESULTS,
GameState.MISSION_VOTE_IN_PROGRESS, GameState.MISSION_VOTE_RESULTS]:
return self.current_round.last_vote.party
return None
@property
def current_party_size(self):
if self.state not in [GameState.NOT_STARTED, GameState.GAME_OVER]:
round_idx = len(self.rounds) - 1
return PARTY_SIZES[len(self.players)][round_idx]
return None
@property
def current_winning_count(self):
if self.state not in [GameState.NOT_STARTED, GameState.GAME_OVER]:
return self.current_round.winning_count
return None
@property
def leader(self):
if self.state not in [GameState.NOT_STARTED, GameState.GAME_OVER]:
return self.players[self._leader_idx]
return None
@property
def outcome(self):
outcomes = [x.outcome for x in self.rounds]
resistance_wins = outcomes.count(True)
spy_wins = outcomes.count(False)
if resistance_wins >= WIN_LIMIT:
return True
elif spy_wins >= WIN_LIMIT:
return False
return None
def _assert_registered(self, user: telegram.User):
if user not in self.players:
raise GameError("You are not registered!")
def _assign_spies(self):
# According to the official rules, one third of players (rounded up) are spies
spy_count = (len(self.players) + 2) // 3
self.spies = random.sample(self.players, spy_count)
self._log("Spies appointed: %s", list(x.name for x in self.spies))
def _next_leader(self):
self._leader_idx = (self._leader_idx + 1) % len(self.players)
def _next_round_or_gameover(self):
if self.outcome is None:
winning_count = 1
if len(self.players) >= MIN_2IN4TH and len(self.rounds) == 3:
winning_count = 2
self.rounds.append(Round(winning_count))
self.state = GameState.PROPOSAL_PENDING
self._log("Round %s begins", len(self.rounds))
else:
self.state = GameState.GAME_OVER
self._log("The game is over: %s", "resistance wins" if self.outcome else "spies win")
def _log(self, message: str, *args):
logger.info("[chat.id: {0}] {1}".format(self.chat.id, message), *args)
| StarcoderdataPython |
1794594 | import factory
import pytest
from django.contrib.auth.models import User
from factory import fuzzy
from faker import Faker
from recipes.models import Category, Recipe
pytestmark = pytest.mark.django_db
Faker.seed(0)
faker = Faker(["pt_BR"])
class AuthorFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
first_name = faker.first_name()
last_name = faker.last_name()
username = faker.user_name()
password = <PASSWORD>(length=12)
email = faker.email()
| StarcoderdataPython |
3440912 | <reponame>nik-sm/meta-dataset
# coding=utf-8
# Copyright 2019 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Module responsible for decoding image/feature examples."""
import gin.tf
import tensorflow.compat.v1 as tf
@gin.configurable
class ImageDecoder(object):
"""Image decoder."""
def __init__(self, image_size=None, data_augmentation=None):
"""Class constructor.
Args:
image_size: int, desired image size. The extracted image will be resized
to `[image_size, image_size]`.
data_augmentation: A DataAugmentation object with parameters for
perturbing the images.
"""
self.image_size = image_size
self.data_augmentation = data_augmentation
def __call__(self, example_string):
"""Processes a single example string.
Extracts and processes the image, and ignores the label. We assume that the
image has three channels.
Args:
example_string: str, an Example protocol buffer.
Returns:
image_rescaled: the image, resized to `image_size x image_size` and
rescaled to [-1, 1]. Note that Gaussian data augmentation may cause values
to go beyond this range.
"""
image_string = tf.parse_single_example(
example_string,
features={
'image': tf.FixedLenFeature([], dtype=tf.string),
'label': tf.FixedLenFeature([], tf.int64)
})['image']
image_decoded = tf.image.decode_image(image_string, channels=3)
image_decoded.set_shape([None, None, 3])
image_resized = tf.image.resize_images(
image_decoded, [self.image_size, self.image_size],
method=tf.image.ResizeMethod.BILINEAR,
align_corners=True)
image_resized = tf.cast(image_resized, tf.float32)
image = 2 * (image_resized / 255.0 - 0.5) # Rescale to [-1, 1].
if self.data_augmentation is not None:
if self.data_augmentation.enable_gaussian_noise:
image = image + tf.random_normal(
tf.shape(image)) * self.data_augmentation.gaussian_noise_std
if self.data_augmentation.enable_jitter:
j = self.data_augmentation.jitter_amount
paddings = tf.constant([[j, j], [j, j], [0, 0]])
image = tf.pad(image, paddings, 'REFLECT')
image = tf.image.random_crop(image,
[self.image_size, self.image_size, 3])
return image
@gin.configurable
class FeatureDecoder(object):
"""Feature decoder."""
def __call__(self, example_string):
"""Processes a single example string.
Extracts and processes the feature, and ignores the label.
Args:
example_string: str, an Example protocol buffer.
Returns:
feat: The feature tensor.
"""
feat_string = tf.parse_single_example(
example_string,
features={
'image/embedding': tf.FixedLenFeature([], dtype=tf.string),
'image/class/label': tf.FixedLenFeature([], tf.int64)
})['image/embedding']
feat = tf.io.parse_tensor(feat_string, tf.float32)
return feat
| StarcoderdataPython |
397476 | <reponame>Xtansia/Discorgeous
from .configuration.general import Configuration as GeneralConfiguration
from .configuration.client import Configuration as ClientConfiguration
from .configuration.server import Configuration as ServerConfiguration
from .client import Client
| StarcoderdataPython |
1826877 | <filename>stub/__init__.py
from stub.main import display
__all__ = ['display']
| StarcoderdataPython |
288444 | <reponame>lumapps/lumapps-sdk
class InvalidLogin(Exception):
pass
| StarcoderdataPython |
9655571 | import itertools
import numpy as np
from ..normalizations import linear_normalization
from ..distance_metrics import euclidean
from .mcda_method import MCDA_method
class CODAS(MCDA_method):
def __init__(self, normalization_method = linear_normalization, distance_metric = euclidean, tau = 0.02):
"""
Create the CODAS method object and select normalization method `normalization_method`, default
normalization method for CODAS is `linear_normalization`, distance metric
`distance_metric` selected from `distance_metrics`, which is `euclidean` by default and tau parameter `tau`,
which is set on 0.02 by default.
Parameters
-----------
normalization_method : function
method for decision matrix normalization chosen from `normalizations`
distance_metric : functions
method for calculating the distance between two vectors
tau : float
the threshold parameter between 0.01 to 0.05. If the difference between
Euclidean `euclidean` or other selected distances of two alternatives is less than tau, these two alternatives
are also compared by the Taxicab distance
"""
self.normalization_method = normalization_method
self.distance_metric = distance_metric
self.tau = tau
def __call__(self, matrix, weights, types):
"""
Score alternatives provided in decision matrix `matrix` with m alternatives and n criteria
using criteria `weights` and criteria `types`.
Parameters
-----------
matrix : ndarray
Decision matrix with m alternatives in rows and n criteria in columns.
weights: ndarray
Vector with criteria weights. Sum of weights must be equal to 1.
types: ndarray
Vector with criteria types. Profit criteria are represented by 1 and cost by -1.
Returns
--------
ndrarray
Vector with preference values of each alternative. The best alternative has the highest preference value.
Examples
----------
>>> codas = CODAS(normalization_method = linear_normalization, distance_metric = euclidean, tau = 0.02)
>>> pref = codas(matrix, weights, types)
>>> rank = rank_preferences(pref, reverse = True)
"""
CODAS._verify_input_data(matrix, weights, types)
return CODAS._codas(self, matrix, weights, types, self.normalization_method, self.distance_metric)
# psi 0.01 - 0.05 recommended range of tau (threshold parameter) value
def _psi(self, x):
return 1 if np.abs(x) >= self.tau else 0
@staticmethod
def _codas(self, matrix, weights, types, normalization_method, distance_metric):
# Normalize matrix using linear normalization
norm_matrix = normalization_method(matrix, types)
# Multiply all rows of normalized matrix by weights
weighted_matrix = norm_matrix * weights
m, n = weighted_matrix.shape
# Calculate NIS vector (anti-ideal solution)
nis = np.min(weighted_matrix, axis=0)
# Calculate chosen distance (for example Euclidean) and Taxicab distance from anti-ideal solution
E = np.array([distance_metric(x, nis) for x in weighted_matrix])
# Calculate Taxicab (Manhattan) distance
T = np.sum(np.abs(weighted_matrix - nis), axis=1)
# Construct the relative assessment matrix H
h = np.zeros((m, m))
for i, j in itertools.product(range(m), range(m)):
h[i, j] = (E[i] - E[j]) + (self._psi(E[i] - E[j]) * (T[i] - T[j]))
H = np.sum(h, axis=1)
return H | StarcoderdataPython |
8038500 | # -*- coding: utf-8 -*-
from django.contrib.admin.utils import prepare_lookup_value
from django.contrib import admin
from django.db.models import Q
from django.utils.encoding import force_text
from django.utils.translation import gettext_lazy as _
from django.contrib.admin.utils import reverse_field_path
from django.contrib.admin.utils import get_model_from_relation
from django.core.exceptions import ValidationError
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.filters import AllValuesFieldListFilter
from django.contrib.admin.filters import ChoicesFieldListFilter
from django.contrib.admin.filters import RelatedFieldListFilter
from django.contrib.admin.filters import RelatedOnlyFieldListFilter
# Generic filter using a dropdown widget instead of a list.
class DropdownFilter(AllValuesFieldListFilter):
"""
Dropdown filter for all kind of fields.
"""
template = 'more_admin_filters/dropdownfilter.html'
class ChoicesDropdownFilter(ChoicesFieldListFilter):
"""
Dropdown filter for fields using choices.
"""
template = 'more_admin_filters/dropdownfilter.html'
class RelatedDropdownFilter(RelatedFieldListFilter):
"""
Dropdown filter for relation fields.
"""
template = 'more_admin_filters/dropdownfilter.html'
class RelatedOnlyDropdownFilter(RelatedOnlyFieldListFilter):
"""
Dropdown filter for relation fields using limit_choices_to.
"""
template = 'more_admin_filters/dropdownfilter.html'
class MultiSelectMixin(object):
def queryset(self, request, queryset):
params = Q()
for lookup_arg, value in self.used_parameters.items():
params |= Q(**{lookup_arg:value})
try:
return queryset.filter(params)
except (ValueError, ValidationError) as e:
# Fields may raise a ValueError or ValidationError when converting
# the parameters to the correct type.
raise IncorrectLookupParameters(e)
def querystring_for_choices(self, val, changelist):
lookup_vals = self.lookup_vals[:]
if val in self.lookup_vals:
lookup_vals.remove(val)
else:
lookup_vals.append(val)
if lookup_vals:
query_string = changelist.get_query_string({
self.lookup_kwarg: ','.join(lookup_vals),
}, [])
else:
query_string = changelist.get_query_string({},
[self.lookup_kwarg])
return query_string
def querystring_for_isnull(self, changelist):
if self.lookup_val_isnull:
query_string = changelist.get_query_string({},
[self.lookup_kwarg_isnull])
else:
query_string = changelist.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [])
return query_string
def has_output(self):
return len(self.lookup_choices) > 1
class MultiSelectFilter(MultiSelectMixin, admin.AllValuesFieldListFilter):
"""
Multi select filter for all kind of fields.
"""
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__in' % field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
lookup_vals = request.GET.get(self.lookup_kwarg)
self.lookup_vals = lookup_vals.split(',') if lookup_vals else list()
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.empty_value_display = model_admin.get_empty_value_display()
parent_model, reverse_path = reverse_field_path(model, field_path)
# Obey parent ModelAdmin queryset when deciding which options to show
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(admin.AllValuesFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
self.used_parameters = self.prepare_used_parameters(self.used_parameters)
def prepare_querystring_value(self, value):
# mask all commas or these values will be used
# in a comma-seperated-list as get-parameter
return str(value).replace(',', '%~')
def prepare_used_parameters(self, used_parameters):
# remove comma-mask from list-values for __in-lookups
for key, value in used_parameters.items():
if not key.endswith('__in'): continue
used_parameters[key] = [v.replace('%~', ',') for v in value]
return used_parameters
def choices(self, changelist):
yield {
'selected': not self.lookup_vals and self.lookup_val_isnull is None,
'query_string': changelist.get_query_string({}, [self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = str(val)
qval = self.prepare_querystring_value(val)
yield {
'selected': qval in self.lookup_vals,
'query_string': self.querystring_for_choices(qval, changelist),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': self.querystring_for_isnull(changelist),
'display': self.empty_value_display,
}
class MultiSelectRelatedFilter(MultiSelectMixin, admin.RelatedFieldListFilter):
"""
Multi select filter for relation fields.
"""
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
self.lookup_kwarg = '%s__%s__in' % (field_path, field.target_field.name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
lookup_vals = request.GET.get(self.lookup_kwarg)
self.lookup_vals = lookup_vals.split(',') if lookup_vals else list()
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
super(admin.RelatedFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
self.lookup_choices = self.field_choices(field, request, model_admin)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
self.empty_value_display = model_admin.get_empty_value_display()
def choices(self, changelist):
yield {
'selected': not self.lookup_vals and not self.lookup_val_isnull,
'query_string': changelist.get_query_string(
{},
[self.lookup_kwarg, self.lookup_kwarg_isnull]
),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
pk_val = str(pk_val)
yield {
'selected': pk_val in self.lookup_vals,
'query_string': self.querystring_for_choices(pk_val, changelist),
'display': val,
}
if self.include_empty_choice:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': self.querystring_for_isnull(changelist),
'display': self.empty_value_display,
}
class MultiSelectRelatedOnlyFilter(MultiSelectRelatedFilter):
def field_choices(self, field, request, model_admin):
pk_qs = (
model_admin.get_queryset(request)
.distinct()
.values_list("%s__pk" % self.field_path, flat=True)
)
ordering = self.field_admin_ordering(field, request, model_admin)
return field.get_choices(
include_blank=False, limit_choices_to={"pk__in": pk_qs}, ordering=ordering
)
class MultiSelectDropdownFilter(MultiSelectFilter):
"""
Multi select dropdown filter for all kind of fields.
"""
template = 'more_admin_filters/multiselectdropdownfilter.html'
def choices(self, changelist):
query_string = changelist.get_query_string({}, [self.lookup_kwarg, self.lookup_kwarg_isnull])
yield {
'selected': not self.lookup_vals and self.lookup_val_isnull is None,
'query_string': query_string,
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = str(val)
qval = self.prepare_querystring_value(val)
yield {
'selected': qval in self.lookup_vals,
'query_string': query_string,
'display': val,
'value': val,
'key': self.lookup_kwarg,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': query_string,
'display': self.empty_value_display,
'value': 'True',
'key': self.lookup_kwarg_isnull,
}
class MultiSelectRelatedDropdownFilter(MultiSelectRelatedFilter):
"""
Multi select dropdown filter for relation fields.
"""
template = 'more_admin_filters/multiselectdropdownfilter.html'
def choices(self, changelist):
query_string = changelist.get_query_string({}, [self.lookup_kwarg, self.lookup_kwarg_isnull])
yield {
'selected': not self.lookup_vals and not self.lookup_val_isnull,
'query_string': query_string,
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
pk_val = str(pk_val)
yield {
'selected': pk_val in self.lookup_vals,
'query_string': query_string,
'display': val,
'value': pk_val,
'key': self.lookup_kwarg,
}
if self.include_empty_choice:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': query_string,
'display': self.empty_value_display,
'value': 'True',
'key': self.lookup_kwarg_isnull,
}
# Filter for annotated attributes.
# NOTE: The code is more or less the same than admin.FieldListFilter but
# we must not subclass it. Otherwise django's filter setup routine wants a real
# model field.
class BaseAnnotationFilter(admin.ListFilter):
"""
Baseclass for annotation-list-filters.
"""
attribute_name = None
nullable_attribute = None
@classmethod
def init(cls, attribute_name, nullable=True):
"""
Since filters are listed as classes in ModelAdmin.list_filter we are
not able to initialize the filter within the ModelAdmin.
We use this classmethod to setup a filter-class for a specific annotated
attribute::
MyModelAdmin(admin.ModelAdmin):
list_filter = [
MyAnnotationListFilter.init('my_annotated_attribute'),
]
"""
attrs = dict(attribute_name=attribute_name, nullable=nullable)
cls = type('cls.__name__' + attribute_name, (cls,), attrs)
return cls
def __init__(self, request, params, model, model_admin):
self.title = self.attribute_name
super().__init__(request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except (ValueError, ValidationError) as e:
# Fields may raise a ValueError or ValidationError when converting
# the parameters to the correct type.
raise IncorrectLookupParameters(e)
# NOTE: The code is more or less the same than admin.BooleanFieldListFilter but
# we must not subclass it. Otherwise django's filter setup routine wants a real
# model field.
class BooleanAnnotationFilter(BaseAnnotationFilter):
"""
Filter for annotated boolean-attributes.
"""
def __init__(self, request, params, model, model_admin):
self.lookup_kwarg = '%s__exact' % self.attribute_name
self.lookup_kwarg2 = '%s__isnull' % self.attribute_name
self.lookup_val = params.get(self.lookup_kwarg)
self.lookup_val2 = params.get(self.lookup_kwarg2)
super().__init__(request, params, model, model_admin)
if (self.used_parameters and self.lookup_kwarg in self.used_parameters and
self.used_parameters[self.lookup_kwarg] in ('1', '0')):
self.used_parameters[self.lookup_kwarg] = bool(int(self.used_parameters[self.lookup_kwarg]))
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, changelist):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': changelist.get_query_string({self.lookup_kwarg: lookup}, [self.lookup_kwarg2]),
'display': title,
}
if self.nullable_attribute:
yield {
'selected': self.lookup_val2 == 'True',
'query_string': changelist.get_query_string({self.lookup_kwarg2: 'True'}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
| StarcoderdataPython |
76539 | <reponame>animeshpatcha/automation<gh_stars>0
import logging
import sys
import logging.handlers
from scapy.all import *
import pprint
import json
def print_packets(pcap):
# For each packet in the pcap process the contents
# Print out the timestamp in UTC
pkt_structure = {}
pcap_dict = {}
pkt_counter = 0
for pkt in pcap:
pkt_structure[pkt_counter] = {}
if pkt.haslayer(Ether):
pkt_structure[pkt_counter]['src_mac'] = str(pkt.getlayer(Ether).src)
pkt_structure[pkt_counter]['dst_mac'] = str(pkt.getlayer(Ether).dst)
pkt_structure[pkt_counter]['ethtype'] = str(hex(pkt.getlayer(Ether).type))
if pkt.haslayer(Dot1Q):
pkt_structure[pkt_counter]['outer_vlan_id'] = int(pkt[Dot1Q:1].vlan)
try:
if pkt[Dot1Q:3].vlan:
pkt_structure[pkt_counter]['inner_most_vlan_id'] = int(pkt[Dot1Q:3].vlan)
except:
log.info('Third VLAN does not exist')
try:
if pkt[Dot1Q:2].vlan:
pkt_structure[pkt_counter]['inner_vlan_id'] = int(pkt[Dot1Q:2].vlan)
except:
log.info('Second VLAN does not exist')
if pkt.haslayer(IP):
log.info('This is an IP Packet')
ip = pkt.payload
pkt_structure[pkt_counter]['src_ip'] = str(ip.src)
pkt_structure[pkt_counter]['dst_ip'] = str(ip.dst)
pkt_structure[pkt_counter]['pkt_ttl'] = int(ip.ttl)
pkt_structure[pkt_counter]['tos'] = str(ip.tos)
if ip.proto == 1:
log.info('This is an ICMP Packet')
icmp = ip.payload
pkt_structure[pkt_counter]['ip_proto'] = 'ICMP'
pkt_structure[pkt_counter]['type'] = int(icmp.type)
pkt_structure[pkt_counter]['code'] = int(icmp.code)
pkt_structure[pkt_counter]['checksum'] = hex(icmp.chksum)
if ip.proto == 6:
log.info('This is an TCP Packet')
tcp = ip.payload
pkt_structure[pkt_counter]['ip_proto'] = 'TCP'
pkt_structure[pkt_counter]['src_port'] = int(tcp.sport)
pkt_structure[pkt_counter]['dst_port'] = int(tcp.dport)
pkt_structure[pkt_counter]['checksum'] = hex(tcp.chksum)
pkt_structure[pkt_counter]['tcp_window'] = int(tcp.window)
if ip.proto == 17:
log.info('This is an UDP Packet')
udp = ip.payload
pkt_structure[pkt_counter]['ip_proto'] = 'UDP'
pkt_structure[pkt_counter]['src_port'] = int(udp.sport)
pkt_structure[pkt_counter]['dst_port'] = int(udp.dport)
pkt_structure[pkt_counter]['checksum'] = hex(udp.chksum)
pkt_counter = pkt_counter + 1
return pkt_structure
def test(pcap_file):
packets = rdpcap(pcap_file)
json_data = json.dumps(print_packets(packets))
print json_data
def usage():
print "\n Usage: python parse_packet.py <FILENAME>"
if __name__ == '__main__':
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
if (len(sys.argv) == 2):
test(sys.argv[1])
else:
usage()
sys.exit()
| StarcoderdataPython |
9682735 | <filename>DJITelloPy/examples/manual_control_pygame.py
from djitellopy import Tello
import cv2
import pygame
import numpy as np
import time
from robomaster import robot
from tkinter import *
from tkinter import messagebox
# Speed of the drone
# 无人机的速度
S = 60
# Frames per second of the pygame window display
# A low number also results in input lag, as input information is processed once per frame.
# pygame窗口显示的帧数
# 较低的帧数会导致输入延迟,因为一帧只会处理一次输入信息
FPS = 120
class FrontEnd(object):
""" Maintains the Tello display and moves it through the keyboard keys.
Press escape key to quit.
The controls are:
- T: Takeoff
- L: Land
- Arrow keys: Forward, backward, left and right.
- A and D: Counter clockwise and clockwise rotations (yaw)
- W and S: Up and down.
- M+L Locate MissionPad and Lgand
保持Tello画面显示并用键盘移动它
按下ESC键退出
操作说明:
T:起飞
L:降落
方向键:前后左右
A和D:逆时针与顺时针转向
W和S:上升与下降
"""
def __init__(self):
# Init pygame
# 初始化pygame
pygame.init()
# Creat pygame window
# 创建pygame窗口
pygame.display.set_caption("Tello video stream")
self.screen = pygame.display.set_mode([960, 720])
# Init Tello object that interacts with the Tello drone
# 初始化与Tello交互的Tello对象
self.tello = Tello()
# Drone velocities between -100~100
# 无人机各方向速度在-100~100之间
self.for_back_velocity = 0
self.left_right_velocity = 0
self.up_down_velocity = 0
self.yaw_velocity = 0
self.speed = 10
# Drone navigation recording
self.nav_record = []
self.send_rc_control = False
# create update timer
# 创建上传定时器
pygame.time.set_timer(pygame.USEREVENT + 1, 1000 // FPS)
# target_pad = 8
def get_tof_distance(self, tl_drone):
# returns distance in mm 8192 default if out of range
# for i in range(0, 10):
tof_info = tl_drone.sensor.get_ext_tof()
# print("ext tof: {0}".format(tof_info))
return tof_info
def object_detected_distance(self, tl_drone, distance_tolerance=609):
tof_distance = self.get_tof_distance(tl_drone)
# time.sleep(0.5)
if tof_distance < distance_tolerance:
return True
return False
def land_on_mission_pad(self):
# self.tello.enable_mission_pads()
# self.tello.set_mission_pad_detection_direction(1)
count = 0
while count < 5:
# self.tello.rotate_counter_clockwise(90)
try:
current_pad = self.tello.get_mission_pad_id()
except EXCEPTION as ex:
current_pad = -1
print(ex)
# messagebox.OK()
# Go to any pad
if current_pad != -1:
# messagebox.showinfo("LANDING on MissionPad: " + str(current_pad))
pygame.display.set_caption("LANDING on MissionPad: " + str(current_pad))
# self.tello.rotate_clockwise(90)
try:
self.tello.rotate_clockwise(90)
self.tello.go_xyz_speed_mid(0, 0, 40, 25, current_pad)
self.tello.land()
# self.tello.disable_mission_pads()
self.send_rc_control = False
break
except:
continue
# break
count += 1
# messagebox.OK()
# self.tello.disable_mission_pads()
# self.tello.streamon()
def run(self):
# tl_drone = robot.Drone()
# tl_drone.initialize()
self.tello.connect()
# self.tello.turn_motor_on()
self.tello.set_speed(self.speed)
# self.tello.set_video_bitrate(Tello.BITRATE_AUTO)
# self.tello.set_video_resolution(Tello.RESOLUTION_480P)
# In case streaming is on. This happens when we quit this program without the escape key.
# 防止视频流已开启。这会在不使用ESC键退出的情况下发生。
self.tello.streamoff()
self.tello.streamon()
# self.tello.enable_mission_pads()
frame_read = self.tello.get_frame_read()
should_stop = False
# distance_tolerance = 609 #mm
while not should_stop:
for event in pygame.event.get():
if event.type == pygame.USEREVENT + 1:
self.update()
elif event.type == pygame.QUIT:
should_stop = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
should_stop = True
else:
self.keydown(event.key)
elif event.type == pygame.KEYUP:
self.keyup(event.key)
if frame_read.stopped:
break
self.screen.fill([0, 0, 0])
frame = frame_read.frame
# battery n. 电池
text = "Battery: {}%".format(self.tello.get_battery())
cv2.putText(frame, text, (5, 720 - 5),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = np.rot90(frame)
frame = np.flipud(frame)
frame = pygame.surfarray.make_surface(frame)
self.screen.blit(frame, (0, 0))
pygame.display.update()
time.sleep(1 / FPS)
# Call it always before finishing. To deallocate resources.
# 通常在结束前调用它以释放资源
# self.tello.turn_motor_off()
self.tello.end()
def keydown(self, key):
""" Update velocities based on key pressed
Arguments:
key: pygame key
基于键的按下上传各个方向的速度
参数:
key:pygame事件循环中的键事件
"""
if key == pygame.K_UP: # set forward velocity
self.for_back_velocity = S
elif key == pygame.K_DOWN: # set backward velocity
self.for_back_velocity = -S
elif key == pygame.K_LEFT: # set left velocity
self.left_right_velocity = -S
elif key == pygame.K_RIGHT: # set right velocity
self.left_right_velocity = S
elif key == pygame.K_w: # set up velocity
self.up_down_velocity = S
elif key == pygame.K_s: # set down velocity
self.up_down_velocity = -S
elif key == pygame.K_a: # set yaw counter clockwise velocity
self.yaw_velocity = -S
elif key == pygame.K_d: # set yaw clockwise velocity
self.yaw_velocity = S
elif key == pygame.K_f:
self.tello.flip_back()
elif key == pygame.K_m: # enable mission pad detection and landing feature
# self.tello.streamoff()
self.tello.enable_mission_pads()
self.land_on_mission_pad()
self.tello.disable_mission_pads()
def keyup(self, key):
""" Update velocities based on key released
Arguments:
key: pygame key
基于键的松开上传各个方向的速度
参数:
key:pygame事件循环中的键事件
"""
if key == pygame.K_UP or key == pygame.K_DOWN: # set zero forward/backward velocity
self.for_back_velocity = 0
elif key == pygame.K_LEFT or key == pygame.K_RIGHT: # set zero left/right velocity
self.left_right_velocity = 0
elif key == pygame.K_w or key == pygame.K_s: # set zero up/down velocity
self.up_down_velocity = 0
elif key == pygame.K_a or key == pygame.K_d: # set zero yaw velocity
self.yaw_velocity = 0
elif key == pygame.K_t: # takeoff
self.tello.takeoff()
self.send_rc_control = True
elif key == pygame.K_l: # land
self.tello.land()
self.send_rc_control = False
def update(self):
""" Update routine. Send velocities to Tello.
向Tello发送各方向速度信息
"""
if self.send_rc_control:
self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity,
self.up_down_velocity, self.yaw_velocity)
self.nav_record.append()
def main():
frontend = FrontEnd()
# run frontend
frontend.run()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4930030 | <reponame>ATrain951/01.python-com_Qproject<gh_stars>1-10
#!/bin/python3
#
# Complete the kMarsh function below.
#
def kMarsh(grid):
#
# Write your code here.
#
rows = len(grid)
cols = len(grid[0])
up = [[0] * cols for _ in range(rows)]
left = [[0] * cols for _ in range(rows)]
for i in range(rows):
for j in range(cols):
if j > 0:
left[i][j] = left[i][j - 1] + 1 if grid[i][j - 1] != 'x' else 0
if i > 0:
up[i][j] = up[i - 1][j] + 1 if grid[i - 1][j] != 'x' else 0
max_perimeter = 0
for i in range(1, rows):
for j in range(1, cols):
if grid[i][j] != 'x':
a = i - up[i][j]
b = 0
while a < i and 2 * (i - a) + 2 * (j - b) > max_perimeter:
b = max(j - left[a][j], j - left[i][j])
while up[i][b] < i - a and b < j and 2 * (i - a) + 2 * (j - b) > max_perimeter:
b += 1
if b < j and left[i][j] >= j - b and grid[a][b] != 'x':
max_perimeter = max(max_perimeter, 2 * (i - a) + 2 * (j - b))
a += 1
b = 0
print(max_perimeter if max_perimeter > 0 else 'impossible')
if __name__ == '__main__':
mn = input().split()
m = int(mn[0])
n = int(mn[1])
grid = []
for _ in range(m):
grid_item = input()
grid.append(grid_item)
kMarsh(grid)
| StarcoderdataPython |
3519144 | <filename>ExPy/ExPy/module53.py
"""Todo List"""
import sqlite3
def list_notes():
"""List Notes"""
with sqlite3.connect("module53.db") as connection:
cursor = connection.execute("select id, note from notes")
notes = cursor.fetchall()
for (id, note) in notes:
print(id, ":", note)
def add_notes():
"""Add Notes"""
notes = []
while True:
note = input("Add a note: ")
if note == "":
break
notes.append(note)
if len(notes) == 0:
return
notes = ["('" + x.replace("'", "''") + "')" for x in notes]
with sqlite3.connect("module53.db") as connection:
query = "insert into notes (note) values %s;\n" % " ".join(notes)
connection.execute(query)
connection.commit()
def remove_notes():
"""Remove Notes"""
ids = []
while True:
id = input("Remove note by id: ")
if id == "":
break
id = id.replace("'", "''")
ids.append(id)
with sqlite3.connect("module53.db") as connection:
query = "delete from notes where id in (%s)\n" % ", ".join(ids)
connection.execute(query)
connection.commit()
if __name__ == "__main__":
add_notes()
list_notes()
remove_notes()
| StarcoderdataPython |
8168535 | <reponame>OpendataCH/froide
from django.conf import settings
from django.urls import include, path, re_path
from django.utils.translation import pgettext_lazy
from ..views import AttachmentFileDetailView, auth, project_shortlink, search, shortlink
from . import (
account_urls,
list_requests_urls,
make_request_urls,
project_urls,
request_urls,
)
urlpatterns = [
# Translators: request URL
path(pgettext_lazy("url part", "make-request/"), include(make_request_urls)),
# Translators: URL part
path(pgettext_lazy("url part", "requests/"), include(list_requests_urls)),
# Translators: request URL
path(pgettext_lazy("url part", "request/"), include(request_urls)),
# Translators: project URL
path(pgettext_lazy("url part", "project/"), include(project_urls)),
# Translators: project URL
path(pgettext_lazy("url part", "account/"), include(account_urls)),
# Translators: request URL
path(pgettext_lazy("url part", "search/"), search, name="foirequest-search"),
# Translators: Short request URL
# Translators: Short project URL
re_path(
pgettext_lazy("url part", r"^p/(?P<obj_id>\d+)/?$"),
project_shortlink,
name="foirequest-project_shortlink",
),
# Translators: Short-request auth URL
re_path(
pgettext_lazy("url part", r"^r/(?P<obj_id>\d+)/auth/(?P<code>[0-9a-f]+)/$"),
auth,
name="foirequest-auth",
),
re_path(
pgettext_lazy("url part", r"^r/(?P<obj_id>\d+)(?P<url_path>/[\w/\-/]+)$"),
shortlink,
name="foirequest-shortlink_url",
),
re_path(
pgettext_lazy("url part", r"^r/(?P<obj_id>\d+)"),
shortlink,
name="foirequest-shortlink",
),
]
MEDIA_PATH = settings.MEDIA_URL
# Split off domain and leading slash
if MEDIA_PATH.startswith("http"):
MEDIA_PATH = MEDIA_PATH.split("/", 3)[-1]
else:
MEDIA_PATH = MEDIA_PATH[1:]
urlpatterns += [
path(
"%s%s/<int:message_id>/<str:attachment_name>"
% (MEDIA_PATH, settings.FOI_MEDIA_PATH),
AttachmentFileDetailView.as_view(),
name="foirequest-auth_message_attachment",
)
]
| StarcoderdataPython |
35232 | #this file contains models that I have tried out for different tasks, which are reusable
#plus it has the training framework for those models given data - each model has its own data requirements
import numpy as np
import common_libs.utilities as ut
import random
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
import torch
import math
class ModelAbs(nn.Module):
"""
Abstract model without the forward method.
lstm for processing tokens in sequence and linear layer for output generation
lstm is a uni-directional single layer lstm
num_classes = 1 - for regression
num_classes = n - for classifying into n classes
"""
def __init__(self, hidden_size, embedding_size, num_classes):
super(ModelAbs, self).__init__()
self.hidden_size = hidden_size
self.name = 'should be overridden'
#numpy array with batchsize, embedding_size
self.embedding_size = embedding_size
self.num_classes = num_classes
#lstm - input size, hidden size, num layers
self.lstm_token = nn.LSTM(self.embedding_size, self.hidden_size)
#hidden state for the rnn
self.hidden_token = self.init_hidden()
#linear layer for regression - in_features, out_features
self.linear = nn.Linear(self.hidden_size, self.num_classes)
def init_hidden(self):
return (autograd.Variable(torch.zeros(1, 1, self.hidden_size)),
autograd.Variable(torch.zeros(1, 1, self.hidden_size)))
#this is to set learnable embeddings
def set_learnable_embedding(self, mode, dictsize, seed = None):
self.mode = mode
if mode != 'learnt':
embedding = nn.Embedding(dictsize, self.embedding_size)
if mode == 'none':
print 'learn embeddings form scratch...'
initrange = 0.5 / self.embedding_size
embedding.weight.data.uniform_(-initrange, initrange)
self.final_embeddings = embedding
elif mode == 'seed':
print 'seed by word2vec vectors....'
embedding.weight.data = torch.FloatTensor(seed)
self.final_embeddings = embedding
else:
print 'using learnt word2vec embeddings...'
self.final_embeddings = seed
#remove any references you may have that inhibits garbage collection
def remove_refs(self, item):
return
class ModelSequentialRNN(ModelAbs):
"""
Prediction at every hidden state of the unrolled rnn.
Input - sequence of tokens processed in sequence by the lstm
Output - predictions at the every hidden state
uses lstm and linear setup of ModelAbs
each hidden state is given as a seperate batch to the linear layer
"""
def __init__(self, hidden_size, embedding_size, num_classes, intermediate):
super(ModelSequentialRNN, self).__init__(hidden_size, embedding_size, num_classes)
if intermediate:
self.name = 'sequential RNN intermediate'
else:
self.name = 'sequential RNN'
self.intermediate = intermediate
def forward(self, item):
self.hidden_token = self.init_hidden()
#convert to tensor
if self.mode == 'learnt':
acc_embeds = []
for token in item.x:
acc_embeds.append(self.final_embeddings[token])
embeds = torch.FloatTensor(acc_embeds)
else:
embeds = self.final_embeddings(torch.LongTensor(item.x))
#prepare for lstm - seq len, batch size, embedding size
seq_len = embeds.shape[0]
embeds_for_lstm = embeds.unsqueeze(1)
#lstm outputs
#output, (h_n,c_n)
#output - (seq_len, batch = 1, hidden_size * directions) - h_t for each t final layer only
#h_n - (layers * directions, batch = 1, hidden_size) - h_t for t = seq_len
#c_n - (layers * directions, batch = 1, hidden_size) - c_t for t = seq_len
#lstm inputs
#input, (h_0, c_0)
#input - (seq_len, batch, input_size)
lstm_out, self.hidden_token = self.lstm_token(embeds_for_lstm, self.hidden_token)
if self.intermediate:
#input to linear - seq_len, hidden_size (seq_len is the batch size for the linear layer)
#output - seq_len, num_classes
values = self.linear(lstm_out[:,0,:].squeeze()).squeeze()
else:
#input to linear - hidden_size
#output - num_classes
values = self.linear(self.hidden_token[0].squeeze()).squeeze()
return values
class ModelHierarchicalRNN(ModelAbs):
"""
Prediction at every hidden state of the unrolled rnn for instructions.
Input - sequence of tokens processed in sequence by the lstm but seperated into instructions
Output - predictions at the every hidden state
lstm predicting instruction embedding for sequence of tokens
lstm_ins processes sequence of instruction embeddings
linear layer process hidden states to produce output
"""
def __init__(self, hidden_size, embedding_size, num_classes, intermediate):
super(ModelHierarchicalRNN, self).__init__(hidden_size, embedding_size, num_classes)
self.hidden_ins = self.init_hidden()
self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)
if intermediate:
self.name = 'hierarchical RNN intermediate'
else:
self.name = 'hierarchical RNN'
self.intermediate = intermediate
def copy(self, model):
self.linear = model.linear
self.lstm_token = model.lstm_token
self.lstm_ins = model.lstm_ins
def forward(self, item):
self.hidden_token = self.init_hidden()
self.hidden_ins = self.init_hidden()
ins_embeds = autograd.Variable(torch.zeros(len(item.x),self.embedding_size))
for i, ins in enumerate(item.x):
if self.mode == 'learnt':
acc_embeds = []
for token in ins:
acc_embeds.append(self.final_embeddings[token])
token_embeds = torch.FloatTensor(acc_embeds)
else:
token_embeds = self.final_embeddings(torch.LongTensor(ins))
#token_embeds = torch.FloatTensor(ins)
token_embeds_lstm = token_embeds.unsqueeze(1)
out_token, hidden_token = self.lstm_token(token_embeds_lstm,self.hidden_token)
ins_embeds[i] = hidden_token[0].squeeze()
ins_embeds_lstm = ins_embeds.unsqueeze(1)
out_ins, hidden_ins = self.lstm_ins(ins_embeds_lstm, self.hidden_ins)
if self.intermediate:
values = self.linear(out_ins[:,0,:]).squeeze()
else:
values = self.linear(hidden_ins[0].squeeze()).squeeze()
return values
class ModelHierarchicalRNNRelational(ModelAbs):
def __init__(self, embedding_size, num_classes):
super(ModelHierarchicalRNNRelational, self).__init__(embedding_size, num_classes)
self.hidden_ins = self.init_hidden()
self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)
self.linearg1 = nn.Linear(2 * self.hidden_size, self.hidden_size)
self.linearg2 = nn.Linear(self.hidden_size, self.hidden_size)
def forward(self, item):
self.hidden_token = self.init_hidden()
self.hidden_ins = self.init_hidden()
ins_embeds = autograd.Variable(torch.zeros(len(item.x),self.hidden_size))
for i, ins in enumerate(item.x):
if self.mode == 'learnt':
acc_embeds = []
for token in ins:
acc_embeds.append(self.final_embeddings[token])
token_embeds = torch.FloatTensor(acc_embeds)
else:
token_embeds = self.final_embeddings(torch.LongTensor(ins))
#token_embeds = torch.FloatTensor(ins)
token_embeds_lstm = token_embeds.unsqueeze(1)
out_token, hidden_token = self.lstm_token(token_embeds_lstm,self.hidden_token)
ins_embeds[i] = hidden_token[0].squeeze()
ins_embeds_lstm = ins_embeds.unsqueeze(1)
out_ins, hidden_ins = self.lstm_ins(ins_embeds_lstm, self.hidden_ins)
seq_len = len(item.x)
g_variable = autograd.Variable(torch.zeros(self.hidden_size))
for i in range(seq_len):
for j in range(i,seq_len):
concat = torch.cat((out_ins[i].squeeze(),out_ins[j].squeeze()),0)
g1 = nn.functional.relu(self.linearg1(concat))
g2 = nn.functional.relu(self.linearg2(g1))
g_variable += g2
output = self.linear(g_variable)
return output
class ModelSequentialRNNComplex(nn.Module):
"""
Prediction using the final hidden state of the unrolled rnn.
Input - sequence of tokens processed in sequence by the lstm
Output - the final value to be predicted
we do not derive from ModelAbs, but instead use a bidirectional, multi layer
lstm and a deep MLP with non-linear activation functions to predict the final output
"""
def __init__(self, embedding_size):
super(ModelFinalHidden, self).__init__()
self.name = 'sequential RNN'
self.hidden_size = 256
self.embedding_size = embedding_size
self.layers = 2
self.directions = 1
self.is_bidirectional = (self.directions == 2)
self.lstm_token = torch.nn.LSTM(input_size = self.embedding_size,
hidden_size = self.hidden_size,
num_layers = self.layers,
bidirectional = self.is_bidirectional)
self.linear1 = nn.Linear(self.layers * self. directions * self.hidden_size, self.hidden_size)
self.linear2 = nn.Linear(self.hidden_size,1)
self.hidden_token = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.zeros(self.layers * self.directions, 1, self.hidden_size)),
autograd.Variable(torch.zeros(self.layers * self.directions, 1, self.hidden_size)))
def forward(self, item):
self.hidden_token = self.init_hidden()
#convert to tensor
if self.mode == 'learnt':
acc_embeds = []
for token in item.x:
acc_embeds.append(self.final_embeddings[token])
embeds = torch.FloatTensor(acc_embeds)
else:
embeds = self.final_embeddings(torch.LongTensor(item.x))
#prepare for lstm - seq len, batch size, embedding size
seq_len = embeds.shape[0]
embeds_for_lstm = embeds.unsqueeze(1)
lstm_out, self.hidden_token = self.lstm_token(embeds_for_lstm, self.hidden_token)
f1 = nn.functional.relu(self.linear1(self.hidden_token[0].squeeze().view(-1)))
f2 = self.linear2(f1)
return f2
| StarcoderdataPython |
6514588 | import setuptools
import versioneer
requirements = [
# package requirements go here
]
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='openff-spellbook',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Handy functionality for working with OpenFF data",
license="MIT",
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/mobleylab/openff-spellbook',
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=requirements,
keywords='openff-spellbook',
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
# packages=['offsb',
# 'offsb/op',
# 'offsb/tools',
# 'offsb/search',
# 'offsb/qcarchive',
# 'offsb/rdutil',
# 'treedi'],
| StarcoderdataPython |
283064 | import logging
import pytest
from python_ci_cd_sample.log import (
create_logger,
log_return,
log_exception,
inject_logger,
LogError,
)
_LOGGER1 = "test_logger1"
_LOGGER2 = "test_logger2"
_NOT_EXISTS_LOGGER = "test_logger_not_exists"
_EXCEPTION_LOGGER = "test_exception_logger"
create_logger(_LOGGER1)
create_logger(_LOGGER2, file_path=f"tests/{_LOGGER2}.log")
create_logger(
_EXCEPTION_LOGGER,
file_path=f"tests/{_EXCEPTION_LOGGER}.log",
)
def test_log_return():
log_return(name=_LOGGER1)(lambda: "Return value logging! (1)")()
log_return(name=_LOGGER2)(lambda: "Return value logging! (2)")()
with pytest.raises(LogError):
log_return(name=_NOT_EXISTS_LOGGER)(
lambda: "Trying to log with a non-existent logger."
)()
def test_inject_logger():
@inject_logger(name=_LOGGER1)
def one_logger():
logger: logging.Logger = one_logger.logger
logger.info("Inject logger log! (1)")
@inject_logger(name=[_LOGGER1, _LOGGER2])
def multi_logger():
logger1 = multi_logger.logger[0]
logger2 = multi_logger.logger[1]
logger1.info("Inject logger log! (1)")
logger2.info("Inject logger log! (2)")
@inject_logger(name=[_NOT_EXISTS_LOGGER])
def inject_logger_not_exists():
logger: logging.Logger = inject_logger_not_exists.logger
logger.info("Inject logger log! (2)")
one_logger()
multi_logger()
with pytest.raises(LogError):
inject_logger_not_exists()
def test_log_exception():
with pytest.raises(ZeroDivisionError):
log_exception(name=_EXCEPTION_LOGGER)(lambda: 1 / 0)()
| StarcoderdataPython |
4839780 | <reponame>vsverchinsky/conan-recipes<filename>breakpad/0.1/conanfile.py
from conans import ConanFile, CMake, AutoToolsBuildEnvironment, tools
class BreakpadConan(ConanFile):
name = "breakpad"
license = "https://chromium.googlesource.com/breakpad/breakpad/+/refs/heads/master/LICENSE"
url = "https://github.com/audacity/conan-recipes"
description = "Breakpad is a set of client and server components which implement a crash-reporting system."
topics = ("breakpad", "crash-reporting")
settings = "os", "compiler", "build_type", "arch"
options = {"fPIC": [True, False]}
default_options = {"fPIC": True}
generators = "cmake"
exports_sources=["CMakeLists.txt", "patches/*"]
branch = "chrome_90"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
self.run('git clone https://chromium.googlesource.com/breakpad/breakpad --branch %s' %self.branch)
if self.settings.os == 'Linux':
self.run("git clone https://chromium.googlesource.com/linux-syscall-support breakpad/src/third_party/lss")
if self.settings.os == "Windows":
tools.patch(base_path="breakpad", patch_file="patches/win-hide-attachment-full-names.diff")
#TODO: rewrite to cmake configure?
if self.settings.os == 'Linux' or self.settings.os == 'Macos':
autotools = AutoToolsBuildEnvironment(self)
autotools.configure(configure_dir='breakpad')
def requirements(self):
if self.settings.os == 'Linux' or self.settings.os == 'Macos':
self.requires('libcurl/7.77.0')
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
self.copy("*.h", dst="include/google_breakpad", src="breakpad/src/google_breakpad")
self.copy("*.h", dst="include/processor", src="breakpad/src/processor")
self.copy("*.h", dst="include/client", src="breakpad/src/client")
self.copy("*.h", dst="include/common", src="breakpad/src/common")
self.copy("*.h", dst="include/third_party", src="breakpad/src/third_party")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.components["client"].libs.append("breakpad_client")
self.cpp_info.components["processor"].libs.append("breakpad_processor")
self.cpp_info.components["sender"].libs.append("breakpad_sender")
if self.settings.os == 'Linux' or self.settings.os == 'Macos':
self.cpp_info.components['sender'].requires.append('libcurl::libcurl')
| StarcoderdataPython |
9631826 | <reponame>tuxtof/calm-dsl
"""
Calm DSL Runbook Sample for input task
"""
from calm.dsl.runbooks import runbook, runbook_json
from calm.dsl.runbooks import RunbookTask as Task, RunbookVariable as Variable
code = """print "Hello @@{user_name}@@"
print "Your Password is @@{password}@@"
print "Date you selected is @@{date}@@"
print "Time selected is @@{time}@@"
print "User selected is @@{user}@@"
"""
@runbook
def DslInputRunbook():
"Runbook Service example"
Task.Input(
name="Input_Task",
inputs=[
Variable.TaskInput("user_name"),
Variable.TaskInput("password", input_type="password"),
Variable.TaskInput("date", input_type="date"),
Variable.TaskInput("time", input_type="time"),
Variable.TaskInput(
"user", input_type="select", options=["user1", "user2", "user3"]
),
],
)
Task.Exec.escript(name="Exec_Task", script=code)
def main():
print(runbook_json(DslInputRunbook))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1973198 | from dynoup import ma
from scaler.models import Check, App
class CheckSchema(ma.Schema):
id = ma.UUID(dump_only=True)
app_id = ma.UUID(dump_only=True)
dynotype = ma.Str(dump_only=True)
class Meta:
model = Check
fields = ('id', 'app_id', 'url', 'params', 'dynotype')
class AppSchema(ma.ModelSchema):
class Meta:
model = App
fields = ('id', 'name', 'checks')
| StarcoderdataPython |
3437816 | <gh_stars>0
"""
Inference
---------
Module description
"""
import warnings
from abc import ABC, abstractmethod
from collections.abc import Iterable
import chainer
import chainer.functions as F
import numpy as np
from tqdm import tqdm
from brancher.optimizers import ProbabilisticOptimizer
from brancher.variables import DeterministicVariable, Variable, ProbabilisticModel
from brancher.transformations import truncate_model
from brancher.utilities import reassign_samples
from brancher.utilities import zip_dict
from brancher.utilities import sum_from_dim
# def maximal_likelihood(random_variable, number_iterations, optimizer=chainer.optimizers.SGD(0.001)):
# """
# Summary
#
# Parameters
# ---------
# random_variable : brancher.Variable
# number_iterations : int
# optimizer : chainer.optimizers
# Summary
# """
# prob_optimizer = ProbabilisticOptimizer(optimizer) #TODO: This function is not up to date
# prob_optimizer.setup(random_variable)
# loss_list = []
# for iteration in tqdm(range(number_iterations)):
# loss = -F.sum(random_variable.calculate_log_probability({}))
# prob_optimizer.chain.cleargrads()
# loss.backward()
# prob_optimizer.optimizer.update()
# loss_list.append(loss.data)
# return loss_list
def stochastic_variational_inference(joint_model, number_iterations, number_samples,
optimizer=chainer.optimizers.Adam(0.001),
input_values={}, inference_method=None,
posterior_model=None, sampler_model=None,
pretraining_iterations=0): #TODO: input values
"""
Summary
Parameters
---------
"""
if not inference_method:
warnings.warn("The inference method was not specified, using the default reverse KL variational inference")
inference_method = ReverseKL()
if not posterior_model:
posterior_model = joint_model.posterior_model
if not sampler_model: #TODO: clean up
if not sampler_model:
try:
sampler_model = inference_method.sampler_model
except AttributeError:
try:
sampler_model = joint_model.posterior_sampler
except AttributeError:
sampler_model = None
joint_model.update_observed_submodel()
optimizers_list = [ProbabilisticOptimizer(posterior_model, optimizer)]
if inference_method.learnable_model:
optimizers_list.append(ProbabilisticOptimizer(joint_model, optimizer))
if inference_method.learnable_sampler:
optimizers_list.append(ProbabilisticOptimizer(sampler_model, optimizer))
loss_list = []
inference_method.check_model_compatibility(joint_model, posterior_model, sampler_model)
for iteration in tqdm(range(number_iterations)):
loss = inference_method.compute_loss(joint_model, posterior_model, sampler_model, number_samples)
if np.isfinite(loss.data).all():
[opt.chain.cleargrads() for opt in optimizers_list]
loss.backward()
optimizers_list[0].update()
if iteration > pretraining_iterations:
[opt.update() for opt in optimizers_list[1:]]
else:
warnings.warn("Numerical error, skipping sample")
loss_list.append(loss.data)
joint_model.diagnostics.update({"loss curve": np.array(loss_list)})
inference_method.post_process(joint_model) #TODO: this could be implemented with a with block
class InferenceMethod(ABC):
#def __init__(self): #TODO: abstract attributes
# self.learnable_model = False
# self.needs_sampler = False
# self.learnable_sampler = False
@abstractmethod
def check_model_compatibility(self, joint_model, posterior_model, sampler_model):
pass
@abstractmethod
def compute_loss(self, joint_model, posterior_model, sampler_model, number_samples, input_values):
pass
@abstractmethod
def post_process(self, joint_model):
pass
class ReverseKL(InferenceMethod):
def __init__(self):
self.learnable_model = True
self.needs_sampler = False
self.learnable_sampler = False
def check_model_compatibility(self, joint_model, posterior_model, sampler_model):
pass #TODO: Check differentiability of the model
def compute_loss(self, joint_model, posterior_model, sampler_model, number_samples, input_values={}):
loss = -joint_model.estimate_log_model_evidence(number_samples=number_samples,
method="ELBO", input_values=input_values, for_gradient=True)
return loss
def post_process(self, joint_model):
pass
class WassersteinVariationalGradientDescent(InferenceMethod): #TODO: Work in progress
def __init__(self, variational_samplers, particles,
cost_function=None,
deviation_statistics=None,
biased=False,
number_post_samples=8000): #TODO: Work in progress
self.learnable_model = False #TODO: to implement later
self.needs_sampler = True
self.learnable_sampler = True
self.biased = biased
self.number_post_samples = number_post_samples
if cost_function:
self.cost_function = cost_function
else:
self.cost_function = lambda x, y: sum_from_dim((x - y) **2, dim_index=1)
if deviation_statistics:
self.deviation_statistics = deviation_statistics
else:
self.deviation_statistics = lambda lst: sum(lst)
def model_statistics(dic):
num_samples = list(dic.values())[0].shape[0]
reassigned_particles = [reassign_samples(p._get_sample(num_samples), source_model=p, target_model=dic)
for p in particles]
statistics = [self.deviation_statistics([self.cost_function(value_pair[0], value_pair[1]).data
for var, value_pair in zip_dict(dic, p).items()])
for p in reassigned_particles]
return np.array(statistics).transpose()
truncation_rules = [lambda a, idx=index: True if (idx == np.argmin(a)) else False
for index in range(len(particles))]
self.sampler_model = [truncate_model(model=sampler,
truncation_rule=rule,
model_statistics=model_statistics)
for sampler, rule in zip(variational_samplers, truncation_rules)]
def check_model_compatibility(self, joint_model, posterior_model, sampler_model):
assert isinstance(sampler_model, Iterable) and all([isinstance(subsampler, (Variable, ProbabilisticModel))
for subsampler in sampler_model]), "The Wasserstein Variational GD method require a list of variables or probabilistic models as sampler"
# TODO: Check differentiability of the model
def compute_loss(self, joint_model, posterior_model, sampler_model, number_samples, input_values={}):
sampler_loss = sum([-joint_model.estimate_log_model_evidence(number_samples=number_samples, posterior_model=subsampler,
method="ELBO", input_values=input_values, for_gradient=True)
for subsampler in sampler_model])
particle_loss = self.get_particle_loss(joint_model, posterior_model, sampler_model, number_samples,
input_values)
return sampler_loss + particle_loss
def get_particle_loss(self, joint_model, particle_list, sampler_model, number_samples, input_values):
samples_list = [sampler._get_sample(number_samples, input_values=input_values)
for sampler in sampler_model]
if self.biased:
importance_weights = [1./number_samples for _ in sampler_model]
else:
importance_weights = [joint_model.get_importance_weights(q_samples=samples,
q_model=sampler,
for_gradient=False).flatten()
for samples, sampler in zip(samples_list, sampler_model)]
reassigned_samples_list = [reassign_samples(samples, source_model=sampler, target_model=particle)
for samples, sampler, particle in zip(samples_list, sampler_model, particle_list)]
pair_list = [zip_dict(particle._get_sample(1), samples)
for particle, samples in zip(particle_list, reassigned_samples_list)]
particle_loss = sum([F.sum(w*self.deviation_statistics([self.cost_function(value_pair[0], value_pair[1].data)
for var, value_pair in particle.items()]))
for particle, w in zip(pair_list, importance_weights)])
return particle_loss
def post_process(self, joint_model): #TODO: Work in progress
sample_list = [sampler._get_sample(self.number_post_samples)
for sampler in self.sampler_model]
self.weights = []
for sampler, s in zip(self.sampler_model, sample_list):
a = sampler.get_acceptance_probability(number_samples=self.number_post_samples)
_, Z = joint_model.get_importance_weights(q_samples=s,
q_model=sampler,
for_gradient=False,
give_normalization=True)
self.weights.append(a*Z)
self.weights /= np.sum(self.weights)
| StarcoderdataPython |
11300783 | <gh_stars>0
import processing
import os
import sys
def process_geotag_properties(import_path,
geotag_source="exif",
geotag_source_path=None,
offset_time=0.0,
offset_angle=0.0,
local_time=False,
sub_second_interval=0.0,
use_gps_start_time=False,
verbose=False,
rerun=False,
skip_subfolders=False,
video_file=None):
# sanity check if video file is passed
if video_file and not (os.path.isdir(video_file) or os.path.isfile(video_file)):
print("Error, video path " + video_file +
" does not exist, exiting...")
sys.exit(1)
# in case of video processing, adjust the import path
if video_file:
# set sampling path
video_sampling_path = processing.sampled_video_frames_rootpath(
video_file)
import_path = os.path.join(os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join(
os.path.dirname(video_file), video_sampling_path)
# basic check for all
if not import_path or not os.path.isdir(import_path):
print("Error, import directory " + import_path +
" does not exist, exiting...")
sys.exit(1)
# get list of file to process
process_file_list = processing.get_process_file_list(import_path,
"geotag_process",
rerun,
verbose,
skip_subfolders)
if not len(process_file_list):
print("No images to run geotag process")
print("If the images have already been processed and not yet uploaded, they can be processed again, by passing the argument --rerun")
# sanity checks
if geotag_source_path == None and geotag_source != "exif":
# if geotagging from external log file, path to the external log file
# needs to be provided, if not, exit
print("Error, if geotagging from external log, rather than image EXIF, you need to provide full path to the log file.")
processing.create_and_log_process_in_list(process_file_list,
"geotag_process"
"failed",
verbose)
sys.exit(1)
elif geotag_source != "exif" and not os.path.isfile(geotag_source_path) and not os.path.isdir(geotag_source_path):
print("Error, " + geotag_source_path +
" file source of gps/time properties does not exist. If geotagging from external log, rather than image EXIF, you need to provide full path to the log file.")
processing.create_and_log_process_in_list(process_file_list,
"geotag_process"
"failed",
verbose)
sys.exit(1)
# function calls
if geotag_source == "exif":
geotag_properties = processing.geotag_from_exif(process_file_list,
import_path,
offset_angle,
verbose)
elif geotag_source == "gpx" or geotag_source == "nmea":
geotag_properties = processing.geotag_from_gps_trace(process_file_list,
import_path,
geotag_source,
geotag_source_path,
offset_time,
offset_angle,
local_time,
sub_second_interval,
use_gps_start_time,
verbose)
elif geotag_source == "csv":
geotag_properties = processing.geotag_from_csv(process_file_list,
import_path,
geotag_source_path,
offset_time,
offset_angle,
verbose)
elif geotag_source == "gopro_video":
geotag_properties = processing.geotag_from_gopro_video(process_file_list,
import_path,
geotag_source_path,
offset_time,
offset_angle,
local_time,
sub_second_interval,
use_gps_start_time,
verbose)
elif geotag_source == "blackvue_videos":
geotag_properties = processing.geotag_from_blackvue_video(process_file_list,
import_path,
geotag_source_path,
offset_time,
offset_angle,
local_time,
sub_second_interval,
use_gps_start_time,
verbose)
elif geotag_source == "json":
geotag_properties = processing.geotag_from_json(process_file_list,
import_path,
geotag_source_path,
offset_time,
offset_angle,
verbose)
print("Sub process finished")
| StarcoderdataPython |
4846470 | '''
Represents an order in the draw request from DataTables.js.
'''
class DrawRequestOrder:
def __init__(self, column, direction):
self.column = column
self.direction = direction
def __repr__(self):
return "Ord(column=%s, direction=%s)" % (self.column, self.direction)
__str__ = __repr__
| StarcoderdataPython |
45794 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Capture Joulescope data to a JLS v2 file. See https://github.com/jetperch/jls"""
from joulescope import scan_require_one, JlsWriter
from joulescope.units import duration_to_seconds
import argparse
import signal
import time
SIGNALS = {
'current': (1, 'A'),
'voltage': (2, 'V'),
'power': (3, 'W'),
}
def get_parser():
p = argparse.ArgumentParser(
description='Capture Joulescope samples to a JLS v2. See https://github.com/jetperch/jls')
p.add_argument('--duration',
type=duration_to_seconds,
help='The capture duration in float seconds. '
+ 'Add a suffix for other units: s=seconds, m=minutes, h=hours, d=days')
p.add_argument('--signals',
default='current,voltage',
help='The comma-separated list of signals to capture which include current, voltage, power. '
+ 'Defaults to current,voltage')
p.add_argument('filename',
help='The JLS filename to record.')
return p
def run():
quit_ = False
args = get_parser().parse_args()
duration = args.duration
def do_quit(*args, **kwargs):
nonlocal quit_
quit_ = 'quit from SIGINT'
signal.signal(signal.SIGINT, do_quit)
device = scan_require_one(config='auto')
with device:
with JlsWriter(device, args.filename, signals=args.signals) as p:
device.stream_process_register(p)
t_stop = None if duration is None else time.time() + duration
device.start()
print('Capturing data: type CTRL-C to stop')
while not quit_:
time.sleep(0.01)
if t_stop and time.time() > t_stop:
break
device.stop()
return 0
if __name__ == '__main__':
run()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.