index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
990,300 | 414ff8f6129b70ad98a46db570fd287457701ce1 | import os
import numpy as np
from matplotlib import pyplot as plt
import sys
sys.path.append("../utilities")
import constants
import utils
import data
def plot_histograms(run_dir, metadata_sig, metadata_bg):
#trim to even size
if metadata_bg.shape[0] > metadata_sig.shape[0]:
metadata_bg = metadata_bg[:metadata_sig.shape[0], :]
else:
metadata_sig = metadata_sig[:metadata_bg.shape[0], :]
for j in range(0, 4):
if j == 0:
name = 'pull1'
elif j == 1:
name = 'pull2'
elif j == 2:
name = 'jet_mass'
elif j == 3:
name = 'jet_delta_R'
hist, bins = np.histogram(metadata_sig[:, j], bins = 100)
plt.plot(bins[:-1], hist, drawstyle='steps-post', color='blue', label='qq')
hist, bins = np.histogram(metadata_bg[:, j], bins = 100)
plt.plot(bins[:-1], hist, drawstyle='steps-post', color='red', label='gg')
plt.title(name)
plt.legend(loc='upper right')
plt.savefig(run_dir+name+'.png')
plt.clf()
def main():
import argparse
parser = argparse.ArgumentParser(description='Plot histograms on given data.')
parser.add_argument('--run_dir', default='../histograms/', help='The directory in which histogram plots should be saved.')
args = parser.parse_args()
if not args.run_dir:
args.run_dir = utils.make_run_dir()
print('[clustering] New run directory created at {}'.format(args.run_dir))
_, metadata_sig = data.get_pixels_metadata(octet=False)
_, metadata_bg = data.get_pixels_metadata(octet=True)
plot_histograms(args.run_dir, np.array(metadata_sig), np.array(metadata_bg))
if __name__ == '__main__':
main()
|
990,301 | 7c28662af031537fc9c940dfe88c2a7f352d1eaa | """Basic loading & dumping functions."""
from pathlib import Path
from typing import Sequence
import tablib
from peewee import Model
from songbook import models
def _dump_table(table: Model, directory: Path, format_: str):
"""Dump a single table."""
try:
table.select().tuples()
table.fields()
dataset = tablib.Dataset(*table.select().tuples(), headers=table.fields())
except:
print(table._meta.database.get_columns(table.table_name()))
if directory is not None:
print(f" Dumping {table.table_name()}...")
out_file = Path(directory) / f"{table.table_name()}.{format_}"
out_file.write_text(dataset.export(format_))
print(" Done.")
print("=====================")
else:
print(dataset.export("csv"))
def _load_table(table: Model, directory: Path, format_: str):
"""Dump a single table."""
if directory is not None:
print(f" Loading {table.table_name()}...")
in_file = Path(directory) / f"{table.table_name()}.{format_}"
dataset = tablib.Dataset(headers=table.fields()).load(in_file.read_text())
print(f" Importing {table.table_name()} into the database...")
table.insert_many(dataset.dict).execute()
print(" Done.")
print("=====================")
else:
pass
# print(dataset.export("csv"))
def dump(tables: Sequence[Model] = None, directory: str = None):
"""Dump the existing tables into csv files."""
tables = tables or models.TABLES
directory = Path(directory or "data/csv/").absolute()
if not directory.exists():
raise ValueError(f"{directory} is not a valid path.")
print(f"Target directory: {directory}")
for i, table in enumerate(tables):
print(f"{i+1}. Processing {table.table_name()}...")
print(f" Fields: {table.fields()}")
_dump_table(table=table, directory=directory, format_="csv")
def load(tables: Sequence[Model] = None, directory: str = None):
"""Load the existing csv files to the database."""
tables = tables or models.TABLES
directory = Path(directory or "data/csv/").absolute()
if not directory.exists():
raise ValueError(f"{directory} is not a valid path.")
print(f"Target directory: {directory}")
for i, table in enumerate(tables):
print(f"{i+1}. Processing {table.table_name()}...")
print(f" Fields: {table.fields()}")
_load_table(table=table, directory=directory, format_="csv")
if __name__ == "__main__":
models.init("/Users/kip/projects/lego-songbook/data/songbook.db")
load(directory="/Users/kip/projects/lego-songbook/data/csv/")
|
990,302 | 0d6a09d7f4145dad5f64ad85d4696b4b8f03fa86 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class DannyGRnR():
def __init__(self, ifilepath=None, idata=None, iUSL=0, iLSL=0, ivaluelist = None, ifigname='My CPK'):
self.iUSL = iUSL
self.iLSL = iLSL
self.data = idata
self.ifigname = ifigname
self.ivaluelist = ivaluelist
self.df = pd.read_csv(ifilepath, sep="\t")
def AnalysisGRnR(self):
# load data file
df = self.df
ivaluelist = self.ivaluelist
if ivaluelist == None:
ivaluelist = list(df.columns)
print(ivaluelist)
# reshape the d dataframe suitable for statsmodels package
df_melt = pd.melt(df.reset_index(), id_vars=['index'], value_vars=ivaluelist)
# replace column names
df_melt.columns = ['index', 'treatments', 'value']
# generate a boxplot to see the data distribution by treatments. Using boxplot, we can
# easily detect the differences between different treatments
ax = sns.boxplot(x='treatments', y='value', data=df_melt, color='#99c2a2')
ax = sns.swarmplot(x="treatments", y="value", data=df_melt, color='#7d0013')
#ax.set(ylim=(15, 29))
#a_plot = sns.lmplot('X','Y', data)
plt.show()
#DannyGRnR('mdata1.txt').AnalysisGRnR()
'''# importing packages
import seaborn as sns
import matplotlib.pyplot as plt
# current colot palette
palette = sns.color_palette('Greens', 11)
# sequential color palette
sns.palplot(palette)
plt.show()'''
|
990,303 | e4fdc8b43e224a182d309628837f33ad1c7241f3 | import serial
import struct
ser = serial.Serial(port='/dev/ttyS0',baudrate=115200)
def serialLoop():
seri = [0]*3
while True:
line = ser.read()
line = struct.unpack('B',line)
if line[0] > 127:
seri[0] = line[0]
line = ser.read()
line = struct.unpack('B',line)
if line[0] < 128:
seri[1] = line[0]
line = ser.read()
line = struct.unpack('B',line)
if line[0] < 128:
seri[2]= line[0]
return(seri)
|
990,304 | e4e42a7944c168a915ac4ee0a91ccdc12ef874c3 | a = 3
b = 2
resultado = (a == b)
print(resultado)
resultado = (a != b)
print(resultado)
resultado = (a > b)
print(resultado)
resultado = (a >= b)
print(resultado)
resultado = (a <= b)
print(resultado)
resultado = (a < b)
print(resultado) |
990,305 | 6f4bf8d0a1eca8264b3aae3dcd89e0ab0574f385 | def evaluate_policy(env, policy, trials = 1000):
total_reward = 0
for _ in range(trials):
env.reset()
done = False
observation, reward, done, info = env.step(policy[0])
total_reward += reward
while not done:
observation, reward, done, info = env.step(policy[observation])
total_reward += reward
return total_reward / trials
def evaluate_policy_discounted(env, policy, discount_factor, trials = 1000):
total_reward = 0
for _ in range(trials):
env.reset()
done = False
cnt = 0
observation, reward, done, info = env.step(policy[0])
total_reward += reward
while not done:
observation, reward, done, info = env.step(policy[observation])
total_reward += discount_factor**cnt * reward
cnt += 1
return total_reward / trials |
990,306 | f569e69ca5d8c8a18e175bc40b40a8ee7cbd81c2 | class DatosMensaje:
def __init__(self):
self.Nombre = ""
self.Mensaje = "" |
990,307 | 2bc074b388b899edc666098262184ba22e40ce5d | from PIL import Image, ImageDraw
from BreastCancerDiagnosis.Model import Net as BreastCancerModel
import numpy as np
from torch import load
from torchvision import transforms
from SkinCancer.Model import Net as SkinCancerModel
x=Image.open('samples/skin_cancer_benign.jpg').convert('RGB').resize([300,300])
x.show()
img = np.array(x)
model = SkinCancerModel()
model.load_state_dict(load('SkinCancer/model_skin_cancer.pt'))
transform = transforms.Compose([transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize(mean=[0], std=[1])])
img_tensor = transform(img).view(1, 3, 300, 300)
y_pred = model.forward(img_tensor).detach().numpy()
y_pred = np.round(y_pred)
print('Predicted_class:', end=' ')
if y_pred == 1:
print('Malignant tumor')
else:
print('Benign tumor')
|
990,308 | f75710d49ff478b675faf720ff3d31f950d951f8 | # setup.py
import sys, pathlib, re, json
from setuptools import setup, find_packages
# --- Get the text of README.md
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
# --- Get the version number
reVersion = re.compile(r'^__version__\s*=\s*\"(.*)\"')
version = ''
with open('./parserUtils.py') as fh:
for line in fh:
result = reVersion.match(line)
if result:
version = result.group(1)
break
setup(
name = "PLLParser",
version = version,
author = "John Deighan",
author_email = "john.deighan@gmail.com",
description = "Parse a Python-like language",
long_description = README,
long_description_content_type = "text/markdown",
license="MIT",
url = "https://github.com/johndeighan/PLLParser",
packages = find_packages(),
py_modules = ['parserUtils','TreeNode','RETokenizer','PLLParser'],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires = '>=3.6',
)
|
990,309 | d697aa17dc89830fa6f8e973991b1ff00277c837 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
print_function,
division,
absolute_import)
from six.moves import xrange
# =============================================================================
# Imports
# =============================================================================
from numpy.testing import (
TestCase,
run_module_suite,
assert_,
assert_allclose,
assert_array_almost_equal_nulp,
assert_array_max_ulp,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises,
assert_raises_regex,
assert_warns,
assert_string_equal)
import numpy as np
import os
from kamrecsys.data import EventData
from kamrecsys.datasets import SAMPLE_PATH, load_movielens_mini
# =============================================================================
# Module variables
# =============================================================================
# =============================================================================
# Functions
# =============================================================================
def load_test_data():
infile = os.path.join(SAMPLE_PATH, 'pci.event')
dtype = np.dtype([('event', 'U18', 2), ('score', float)])
x = np.genfromtxt(fname=infile, delimiter='\t', dtype=dtype)
data = EventData(n_otypes=2, event_otypes=np.array([0, 1]))
data.set_event(x['event'])
return data, x
# =============================================================================
# Test Classes
# =============================================================================
class TestEventUtilMixin(TestCase):
def test_to_eid_event(self):
data, x = load_test_data()
# test to_eid_event
check = data.to_eid_event(data.event)
assert_array_equal(x['event'], check)
# test to_eid_event / per line conversion
check = np.empty_like(data.event, dtype=x['event'].dtype)
for i, j in enumerate(data.event):
check[i, :] = data.to_eid_event(j)
assert_array_equal(x['event'], check)
def test_to_iid_event(self):
from kamrecsys.data import EventWithScoreData
data, x = load_test_data()
# test EventData.to_iid_event
assert_array_equal(data.event, data.to_iid_event(x['event']))
# test EventData.to_iid_event / per line conversion
check = np.empty_like(x['event'], dtype=int)
for i, j in enumerate(x['event']):
check[i, :] = data.to_iid_event(j)
assert_array_equal(data.event, check)
class TestEventData(TestCase):
def test_filter_event(self):
from kamrecsys.data import EventWithScoreData
# load movie_lens
data = load_movielens_mini()
# filter events
filter_cond = np.arange(data.n_events) % 3 == 0
filtered_data = super(
EventWithScoreData, data).filter_event(filter_cond)
assert_array_equal(
filtered_data.event[:, 0], [1, 5, 3, 4, 0, 0, 0, 2, 2, 0])
assert_array_equal(
filtered_data.event[:, 1], [1, 3, 6, 5, 7, 6, 4, 0, 7, 2])
assert_array_equal(
filtered_data.to_eid(0, filtered_data.event[:, 0]),
data.to_eid(0, data.event[filter_cond, 0]))
assert_array_equal(
filtered_data.to_eid(1, filtered_data.event[:, 1]),
data.to_eid(1, data.event[filter_cond, 1]))
assert_array_equal(
filtered_data.event_feature['timestamp'],
[875636053, 877889130, 891351328, 879362287, 878543541,
875072484, 889751712, 883599478, 883599205, 878542960])
assert_array_equal(filtered_data.eid[0], [1, 5, 6, 7, 8, 10])
assert_array_equal(filtered_data.eid[1], [1, 2, 3, 4, 5, 7, 8, 9])
assert_equal(
filtered_data.iid[0],
{1: 0, 5: 1, 6: 2, 7: 3, 8: 4, 10: 5})
assert_equal(
filtered_data.iid[1],
{1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 7: 5, 8: 6, 9: 7})
assert_equal(
filtered_data.feature[0]['zip'],
[u'85711', u'15213', u'98101', u'91344', u'05201', u'90703'])
assert_equal(
filtered_data.feature[1]['name'],
[u'Toy Story (1995)', u'GoldenEye (1995)', u'Four Rooms (1995)',
u'Get Shorty (1995)', u'Copycat (1995)', u'Twelve Monkeys (1995)',
u'Babe (1995)', u'Dead Man Walking (1995)'])
# dummy event data
data = EventData()
data.set_event(np.tile(np.arange(5), (2, 2)).T)
filtered_data = data.filter_event(
[True, False, True, True, False, False, True, True, False, False])
assert_equal(filtered_data.n_events, 5)
assert_array_equal(
filtered_data.event, [[0, 0], [2, 2], [3, 3], [1, 1], [2, 2]])
# =============================================================================
# Main Routines
# =============================================================================
if __name__ == '__main__':
run_module_suite()
|
990,310 | 02920141e923403a5a605c6faa572f6561066e59 | import asyncpg
from settings import settings
class Pool:
async def open_pool(self):
self.pool = await asyncpg.create_pool(dsn=settings.PGSTRING)
async def close_pool(self):
await self.pool.close()
def get_pool(self):
return self.pool
db = Pool()
|
990,311 | e68e7b2479d2625d6256614d519e6c852bc34eb6 | from .models import Produto
from django.forms import ModelForm
from django import forms
class ProdutoFrom(ModelForm):
class Meta:
model = Produto
fields = ['descricao', 'preco']
def send_email(self):
pass
|
990,312 | c796cb5535169e91ac860ff06d62ee1f40b94427 | # USAGE
# python build_dataset.py
# ========================
# ~/mxnet/bin/im2rec /raid/datasets/adience/lists/age_train.lst "" \
# /raid/datasets/adience/rec/age_train.rec resize=256 encoding='.jpg' quality=100
# ~/mxnet/bin/im2rec /raid/datasets/adience/lists/age_val.lst "" \
# /raid/datasets/adience/rec/age_val.rec resize=256 encoding='.jpg' quality=100
# ~/mxnet/bin/im2rec /raid/datasets/adience/lists/age_test.lst "" \
# /raid/datasets/adience/rec/age_test.rec resize=256 encoding='.jpg' quality=100
# ========================
# ~/mxnet/bin/im2rec /raid/datasets/adience/lists/gender_train.lst "" \
# /raid/datasets/adience/rec/gender_train.rec resize=256 encoding='.jpg' quality=100
# ~/mxnet/bin/im2rec /raid/datasets/adience/lists/gender_val.lst "" \
# /raid/datasets/adience/rec/gender_val.rec resize=256 encoding='.jpg' quality=100
# ~/mxnet/bin/im2rec /raid/datasets/adience/lists/gender_test.lst "" \
# /raid/datasets/adience/rec/gender_test.rec resize=256 encoding='.jpg' quality=100
# import the necessary packages
from config import age_gender_config as config
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from pyimagesearch.utils import AgeGenderHelper
import numpy as np
import progressbar
import pickle
import json
import cv2
# initialize our helper class, then build the set of image paths
# and class labels
print("[INFO] building paths and labels...")
agh = AgeGenderHelper(config)
(trainPaths, trainLabels) = agh.buildPathsAndLabels()
# now that we have the total number of images in the dataset that
# can be used for training, compute the number of images that
# should be used for validation and testing
numVal = int(len(trainPaths) * config.NUM_VAL_IMAGES)
numTest = int(len(trainPaths) * config.NUM_TEST_IMAGES)
# our class labels are represented as strings so we need to encode
# them
print("[INFO] encoding labels...")
le = LabelEncoder().fit(trainLabels)
trainLabels = le.transform(trainLabels)
# perform sampling from the training set to construct a a validation
# set
print("[INFO] constructing validation data...")
split = train_test_split(trainPaths, trainLabels, test_size=numVal,
stratify=trainLabels)
(trainPaths, valPaths, trainLabels, valLabels) = split
# perform stratified sampling from the training set to construct a
# a testing set
print("[INFO] constructing testing data...")
split = train_test_split(trainPaths, trainLabels, test_size=numTest,
stratify=trainLabels)
(trainPaths, testPaths, trainLabels, testLabels) = split
# construct a list pairing the training, validation, and testing
# image paths along with their corresponding labels and output list
# files
datasets = [
("train", trainPaths, trainLabels, config.TRAIN_MX_LIST),
("val", valPaths, valLabels, config.VAL_MX_LIST),
("test", testPaths, testLabels, config.TEST_MX_LIST)]
# initialize the lists of RGB channel averages
(R, G, B) = ([], [], [])
# loop over the dataset tuples
for (dType, paths, labels, outputPath) in datasets:
# open the output file for writing
print("[INFO] building {}...".format(outputPath))
f = open(outputPath, "w")
# initialize the progress bar
widgets = ["Building List: ", progressbar.Percentage(), " ",
progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(paths),
widgets=widgets).start()
# loop over each of the individual images + labels
for (i, (path, label)) in enumerate(zip(paths, labels)):
# if we are building the training dataset, then compute the
# mean of each channel in the image, then update the
# respective lists
if dType == "train":
image = cv2.imread(path)
(b, g, r) = cv2.mean(image)[:3]
R.append(r)
G.append(g)
B.append(b)
# write the image index, label, and output path to file
row = "\t".join([str(i), str(label), path])
f.write("{}\n".format(row))
pbar.update(i)
# close the output file
pbar.finish()
f.close()
# construct a dictionary of averages, then serialize the means to a
# JSON file
print("[INFO] serializing means...")
D = {"R": np.mean(R), "G": np.mean(G), "B": np.mean(B)}
f = open(config.DATASET_MEAN, "w")
f.write(json.dumps(D))
f.close()
# serialize the label encoder
print("[INFO] serializing label encoder...")
f = open(config.LABEL_ENCODER_PATH, "wb")
f.write(pickle.dumps(le))
f.close() |
990,313 | 87970e05ffc15fc93d41716f813d5d88ed37bb1e | import os
import platform
import subprocess
import sys
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
__version__ = '0.8.0'
__milecsa_api_version__ = '1.1.3'
darwin_flags = ['-mmacosx-version-min=10.12']
class ExtensionWithLibrariesFromSources(Extension):
"""Win is unsupported"""
def __init__(self, name, sources, *args, **kw):
self.libraries_from_sources = kw.pop('libraries_from_sources', [])
if platform.system() == 'Darwin':
kw['extra_link_args'] = kw.get('extra_link_args', []) + darwin_flags
kw['include_dirs'] = kw.get('include_dirs', []) + [
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1'
]
super().__init__(name, sources, *args, **kw)
def build_libraries(self, ext_builder: build_ext):
self.check_cmake_version()
libraries = []
libraries_dirs = ['/usr/lib']
for lib_name, lib_path, lib_version in self.libraries_from_sources:
libraries += [lib_name]
libraries_dirs += self.build_library(
ext_builder, lib_name, os.path.abspath(lib_path), lib_version
)
return libraries, libraries_dirs
@staticmethod
def build_library(ext_builder: build_ext, lib_name, lib_path, lib_version):
build_temp = os.path.join(ext_builder.build_temp, lib_name)
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + build_temp,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if ext_builder.debug else 'Release'
build_args = ['--config', cfg]
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''), lib_version
)
if not os.path.exists(build_temp):
os.makedirs(build_temp)
subprocess.check_call(['cmake', lib_path] + cmake_args,
cwd=build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=build_temp)
return [build_temp, build_temp + '/lib']
def check_cmake_version(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extension: " + self.name
)
class BuildExt(build_ext):
def build_extension(self, ext: Extension):
if type(ext) is ExtensionWithLibrariesFromSources:
ext: ExtensionWithLibrariesFromSources
libraries, library_dirs = ext.build_libraries(self)
ext.libraries += libraries
ext.library_dirs += library_dirs
super().build_extension(ext)
extra_compile_args = ['-std=c++17', '-DVERSION_INFO="{}"'.format(__version__)]
if platform.system() == 'Darwin':
extra_compile_args = extra_compile_args + darwin_flags
ext_modules = [
ExtensionWithLibrariesFromSources(
'__milecsa',
['./src/milecsa_bindings/bindings.cpp'],
include_dirs=[
'src/pybind11/include',
'/usr/include',
'/usr/local/include',
'./src/mile-csa-api/include',
'./src/mile-csa-api/vendor/mile-crypto/include',
'./src/mile-csa-api/vendor/mile-crypto/src/private_include',
'./src/mile-csa-api/vendor/mile-crypto/src/ed25519/include',
'./src/mile-csa-api/vendor/nlohmann'
],
language='c++',
extra_compile_args=extra_compile_args,
libraries_from_sources=[
('milecsa', './src/mile-csa-api', __milecsa_api_version__),
]
),
]
setup(
name='milecsa',
version=__version__,
author="Lotus Mile",
license="MIT",
description='Python Package Mile C Extension',
url="http://mile.global",
packages=['milecsa'],
ext_modules=ext_modules,
install_requires=[
'urllib3',
'requests',
'pillow',
'qrcode'
],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
cmdclass={'build_ext': BuildExt},
)
|
990,314 | 7b48983ee2f981ccc42afb5d4a2ec52fe8000dd7 | import numpy
import Family
def sinx_x(x):
y = numpy.array(1)
sin = numpy.sin(x)
y = numpy.divide(sin, x, where=x != 0) or 1
return y
def cosx_x(x):
y = numpy.array(1)
cos = numpy.cos(x)
y = numpy.divide(cos, x, where=x != 0) or 1
return y
def main():
father_name, father_age = input("insert father name and father age ").split()
mother_name, mother_age = input("insert mather name and mother age").split()
num_of_children = int(input("insert num of children"))
children: dict = dict()
for i in range(num_of_children):
child_name, child_age = input("insert child name and child age").split()
children[child_name] = child_age
last_name = input("insert last name") or "None"
mother_details = {"mother_name": mother_name, "mother_age": mother_age}
father_details = {"father_name": father_name, "father_age": father_age}
parents = {"father": father_details, "mother": mother_details}
f1 = Family(parents=parents, children=children, last_name=last_name)
f1.add_child("ya", 13)
print(f1.get_children(), f1.get_parents_names())
#================
print(sinx_x(0))
#=============
t = numpy.arange(-100, 100, 0.01)
print(t)
sinx = [sinx_x(x) for x in t]
cosx = [cosx_x(x) for x in t]
print(cosx, sinx)
#==================
name_list = ["aaa", "bbb", "ccc", "eee", "fff"]
file_name = input("insert file name")
file = open(file_name+".txt", "w")
[file.write(x+"\n") for x in name_list]
file.close()
file = open(file_name+".txt", "r")
lines = file.readlines()
[print(lines[i]) for i in range(0, len(lines), 2)]
file.close()
if __name__ == "__main__":
main() |
990,315 | 0c109b6e0fc2e0460531af804070ea8b660524d9 | #coding=utf-8
import time
import cv2
#初始化 opencv 的 Cascade Classification,它的作用是产生一个检测器
faceCascade = cv2.CascadeClassifier("/usr/share/opencv/haarcascades/haarcascade_eye.xml")
image = cv2.imread("02.jpg")
#图像灰度化
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#调用
time1=time.time()
faces = faceCascade.detectMultiScale(gray,scaleFactor=1.15,minNeighbors=5,minSize=(5,5))
time2=time.time()
print "Found {0} faces!".format(len(faces))
print '共耗时:'+str(time2-time1)+'s'
#######显示图像并用标记框标出来
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Faces found", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
990,316 | 3fcfb9bc1ce76fc51bcc556cf9de955558e57dd6 |
class NeuralNet(object):
"""Neural Network
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``MaxEntLayer``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out
)
# end-snippet-2 start-snippet-3
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
# end-snippet-3
# keep track of model input
self.input = input
|
990,317 | 73e6d4610f9045b4f1220fc56bd23ab00d2e1802 | import math
from datetime import datetime
import datetime
import time
import pandas as pd
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from django.db import connection
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.views import View
from manage_users.models import *
import dateutil.relativedelta
from .models import *
from .upload_views import *
from .choices_data import \
state_status, \
site_choices, \
visit_choices, \
site_held_choices
class ReviewView(LoginRequiredMixin, View):
"""
Class for reviewing all the uploaded files with day navigation
"""
blood_sample_review_template = \
'review_blood_sample/blood-sample-review.html'
blood_sample_review_table_template = \
'review_blood_sample/blood-sample-table.html'
manifest_review_template = 'review_manifest/manifest-review.html'
manifest_review_table_template = 'review_manifest/manifest-table.html'
receipt_review_template = 'review_receipt/receipt-review.html'
receipt_review_table_template = 'review_receipt/receipt-table.html'
processed_review_template = 'review_processed/processed-review.html'
processed_review_table_template = 'review_processed/processed-table.html'
def get(self, request, *args, **kwargs):
"""
Method to upload the Processed
:param request: request object with review type and day
:return: HttpResponse object
"""
review_type = request.GET.get("type", "blood_sample")
page = int(request.GET.get('page', 1))
table = request.GET.get('table', 'False')
day, days = UploadView.get_dayformated_and_days(self, request)
if review_type == "blood_sample":
# Getting imported sample files in given day
blood_samples_imported = BloodSampleImport.objects.filter(
CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0),
day.replace(
hour=23, minute=59, second=59, microsecond=0)))
# Checking latest uploaded sample file is reviewed or not
if blood_samples_imported.count() > 0:
sample_import_latest = blood_samples_imported.last()
# If not reviewed changing the Reviewed column to True
if not sample_import_latest.Reviewed:
sample_import_latest.Reviewed = True
sample_import_latest.save()
# When first opening the review popup updating the day to the
# latest day where records are available.
# This will avoid user to unnecessary navigation to the day
# where he last uploaded
if request.GET.get('firstOpen', 'False') == "True" and \
BloodSample.objects.count():
day = BloodSample.objects.all().order_by('-CreatedAt')\
.first().CreatedAt
days = [(day - datetime.timedelta(days=x))
for x in range(4)]
days.reverse()
# Getting the BloodSample records
query_results = BloodSample.objects.filter(
CreatedAt__range=(day.replace(hour=0, minute=0, second=0,
microsecond=0), day.replace(
hour=23, minute=59, second=59, microsecond=0)))\
.order_by('CreatedAt', 'CohortId', 'Barcode')
# if Status is passed as filter then reduce list to the
# Status type
if request.GET.get('State',''):
query_results = query_results.filter(
State=request.GET.get('State','')
)
# Getting results based on pagination
paginator = Paginator(query_results, settings.ITEMS_PER_PAGE)
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': paginator.num_pages,
'current_page': page })
if table == "True":
results = paginator.get_page(page)
return render(request,
self.blood_sample_review_table_template, {
"objects": results.object_list,
"current_page": page,
"class": 'reviewBloodDay',
"total_pages": paginator.num_pages
})
# Disabling the feature dates
shownextday = datetime.datetime.today().strftime(
'%d%b%y') in [i.strftime('%d%b%y') for i in days]
return render(request, self.blood_sample_review_template, {
"current_page": page,
"total_pages": paginator.num_pages,
"days": days,
"active": day,
"shownextday": shownextday,
"class": 'reviewBloodDay',
})
if review_type == "manifest":
# When first opening the review popup updating the day to
# the latest day where records are there.
# This will avoid user to unnecessary navigation to the day
# where he last uploaded
if request.GET.get('firstOpen', 'False') == "True" and \
ManifestRecords.objects.count():
day = ManifestRecords.objects.all().order_by(
'-CollectionDateTime').first().CollectionDateTime
days = [(day - datetime.timedelta(days=x))
for x in range(4)]
days.reverse()
# Preparing raw sql query with filters
qry = ""
if request.GET.get('Site', ''):
qry += f" AND mr.\"Site\" = '{request.GET.get('Site')}'"
if request.GET.get('Visit', ''):
qry += f" AND mr.\"Visit\" = '{request.GET.get('Visit')}'"
if request.GET.get('Room', ''):
qry += f" AND mr.\"Room\" = '{request.GET.get('Room')}'"
query = '''
SELECT
"mr"."id",
"bs"."CohortId",
"bs"."Barcode" as "BloodSampleBarcode",
"bs"."AppointmentId",
"bs"."SiteNurseEmail",
"bs"."Comments",
"bs"."CreatedAt",
"bs"."State",
"mr"."CohortId" as "ManifestCohortId",
"mr"."Barcode" as "ManifestBarcode",
"mr"."Visit",
"mr"."Site",
"mr"."Room",
"mr"."Comments" as "ManifestComments",
"mr"."CollectionDateTime"
FROM blood_sample_manifestrecords as mr
INNER JOIN blood_sample_bloodsample as bs ON \
( mr."Barcode" = bs."Barcode")
WHERE bs."CreatedAt" BETWEEN '{}' AND '{}'{}
order by bs."Barcode";
'''.format(day.replace(hour=0, minute=0, second=0,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"),
day.replace(hour=23, minute=59, second=59,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"),
qry)
with connection.cursor() as cursor:
cursor.execute(query)
columns = [col[0] for col in cursor.description]
data = [
dict(zip(columns, row))
for row in cursor.fetchall()
]
items_per_page = settings.ITEMS_PER_PAGE
total_pages = math.ceil(len(data) / items_per_page)
if total_pages < page:
page = total_pages
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': 1 if total_pages == 0 else total_pages,
'current_page': 1 if page == 0 else page, })
if table == "True":
# records based on page to display
record_start = (page - 1) * items_per_page
record_end = page * items_per_page
data = data[record_start:record_end]
# Converting State, Visit and Site choices to field names
for row in range(len(data)):
data[row]['State'] = state_status[data[row]['State']]
data[row]['Visit'] = visit_choices[data[row]['Visit']]
data[row]['Site'] = site_choices[data[row]['Site']]
return render(request, self.manifest_review_table_template, {
"objects": data,
"current_page": 1 if page == 0 else page,
"total_pages": 1 if total_pages == 0 else total_pages,
"class": 'reviewManifestDay',
})
# Disabling the feature dates
shownextday = datetime.datetime.today().strftime(
'%d%b%y') in [i.strftime('%d%b%y') for i in days]
# Getting Pagination count for Unmatched tables
# Comparing Blood Sample with Manifest
query = '''
SELECT count(1)
FROM blood_sample_bloodsample as bs
left join blood_sample_manifestrecords as mr on \
bs."Barcode" = mr."Barcode"
WHERE mr."id" is null AND bs."State"='0' AND bs."CreatedAt" BETWEEN '{}' AND '{}'
'''.format(
day.replace(hour=0, minute=0, second=0, microsecond=0),
day.replace(
hour=23, minute=59, second=59, microsecond=0))
with connection.cursor() as cursor:
cursor.execute(query)
data_count = cursor.fetchall()[0][0]
bs_unmatched_total_pages = math.ceil(
data_count / settings.ITEMS_PER_PAGE)
# Comparing Manifest with Blood Sample
query = '''
SELECT count(1)
FROM blood_sample_manifestrecords as mr
left join blood_sample_bloodsample as bs on \
bs."Barcode" = mr."Barcode"
WHERE mr."id" is null AND mr."CollectionDateTime" \
BETWEEN '{}' AND '{}'
'''.format(
day.replace(hour=0, minute=0, second=0, microsecond=0),
day.replace(
hour=23, minute=59, second=59, microsecond=0))
with connection.cursor() as cursor:
cursor.execute(query)
data_count = cursor.fetchall()[0][0]
mf_unmatched_total_pages = math.ceil(
data_count / settings.ITEMS_PER_PAGE)
return render(request, self.manifest_review_template, {
"current_page": 1 if page == 0 else page,
"total_pages": 1 if total_pages == 0 else total_pages,
"bs_total_pages": 1 if bs_unmatched_total_pages == 0
else bs_unmatched_total_pages,
"mf_total_pages": 1 if mf_unmatched_total_pages == 0
else mf_unmatched_total_pages,
"days": days,
"active": day,
"shownextday": shownextday,
"class": 'reviewManifestDay',
})
if review_type == "receipt":
# When first opening the review popup updating the day to the
# latest day where records are there.
# This will avoid user to unnecessary navigation to the day
# where he last uploaded
if request.GET.get('firstOpen', 'False') == "True" and \
ReceiptRecords.objects.count():
day = ReceiptRecords.objects.all().order_by(
'-DateTimeTaken').first().DateTimeTaken
days = [(day - datetime.timedelta(days=x))
for x in range(4)]
days.reverse()
# Preparing raw sql query with filters
qry = ""
if request.GET.get('Site', ''):
qry += f" AND mr.\"Site\" = '{request.GET.get('Site')}'"
if request.GET.get('Visit', ''):
qry += f" AND mr.\"Visit\" = '{request.GET.get('Visit')}'"
if request.GET.get('Room', ''):
qry += f" AND mr.\"Room\" = '{request.GET.get('Room')}'"
query = '''
SELECT
bs."CohortId",
bs."AppointmentId",
bs."Barcode" as "BloodSampleBarcode",
bs."Comments",
bs."SiteNurseEmail",
bs."CreatedAt",
bs."State",
mr."id" as "ManifestId",
mr."Barcode" as "ManifestBarcode",
mr."CohortId" as "ManifestCohortId",
mr."Site",
mr."Visit",
mr."Room",
mr."CollectionDateTime",
mr."Comments" as "ManifestComments",
rr."id" as "ReceiptId",
rr."Barcode" as "ReceiptBarcode",
rr."Clinic",
rr."DateTimeTaken",
rr."TissueSubType",
rr."ReceivedDateTime",
rr."Volume",
rr."VolumeUnit",
rr."Condition",
rr."Comments" as "ReceiptComments"
from blood_sample_receiptrecords as rr
inner join blood_sample_manifestrecords as mr \
on (rr."Barcode"=mr."Barcode")
inner join blood_sample_bloodsample as bs \
on (bs."Barcode"=mr."Barcode")
WHERE bs."CreatedAt" BETWEEN '{}' AND '{}' {}
order by bs."Barcode";
'''.format(day.replace(hour=0, minute=0, second=0,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"),
day.replace(hour=23, minute=59, second=59,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"),
qry)
with connection.cursor() as cursor:
cursor.execute(query)
columns = [col[0] for col in cursor.description]
data = [
dict(zip(columns, row))
for row in cursor.fetchall()
]
items_per_page = settings.ITEMS_PER_PAGE
total_pages = math.ceil(len(data) / settings.ITEMS_PER_PAGE)
if total_pages < page:
page = total_pages
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': 1 if total_pages == 0 else total_pages,
'current_page': 1 if page == 0 else page,
})
if table == "True":
# records based on page to display
record_start = (page - 1) * items_per_page
record_end = page * items_per_page
data = data[record_start:record_end]
# Converting State, Visit and Site choices to field names
for row in range(len(data)):
data[row]['State'] = state_status[data[row]['State']]
data[row]['Visit'] = visit_choices[data[row]['Visit']]
data[row]['Site'] = site_choices[data[row]['Site']]
data[row]['Clinic'] = site_choices[data[row]['Clinic']]
return render(request, self.receipt_review_table_template, {
"objects": data,
"current_page": 1 if page == 0 else page,
"total_pages": 1 if total_pages == 0 else total_pages,
"class": 'reviewReceiptDay',
})
# Disabling the feature dates
shownextday = datetime.datetime.today().strftime(
'%d%b%y') in [i.strftime('%d%b%y') for i in days]
# Getting Pagination count for Unmatched tables
# Comaring Blood sample and Manifest with Receipt
data_bs = UnmachedReceiptView.get_umatched_bs_data(self, day, qry)
bs_total_pages = math.ceil(len(data_bs) / settings.ITEMS_PER_PAGE)
# Comaring Receipt with Blood sample and Manifest
data_rr = UnmachedReceiptView.get_umatched_rr_data(self, day, "")
rr_total_pages = math.ceil(len(data_rr) / settings.ITEMS_PER_PAGE)
return render(request, self.receipt_review_template, {
"current_page": 1 if page == 0 else page,
"total_pages": 1 if total_pages == 0 else total_pages,
"bsr_total_pages": 1 if bs_total_pages == 0
else bs_total_pages,
"rr_total_pages": 1 if rr_total_pages == 0
else rr_total_pages,
"days": days,
"active": day,
"shownextday": shownextday,
"class": 'reviewReceiptDay',
})
if review_type == "processed":
# When first opening the review popup updating the day to
# the latest day where records are there.
# This will avoid user to unnecessary navigation to
# the day where he last uploaded
if request.GET.get('firstOpen', 'False') == "True" and \
ProcessedReport.objects.count():
day = ProcessedReport.objects.all().order_by(
'-ProcessedDateTime').first().ProcessedDateTime
days = [(day - datetime.timedelta(days=x))
for x in range(4)]
days.reverse()
# Getting settings options
settings_options = dict(
BS=[request.GET.get('BloodSample',
"CohortId,Barcode,CreatedAt,Comments,State")],
MR=[request.GET.get('Manifest', "Visit,Site,Room,Barcode")],
RR=[request.GET.get('Receipt', "SampleId,Clinic")],
PR=[request.GET.get('Processed',
"ParentId,TissueSubType,ProcessedDateTime" +
",ReceivedDateTime,Volume,NumberOfChildren,Comments")])
for table, columns in settings_options.items():
for column in columns:
settings_options[table] = column.split(',')
# Getting filters options
filter_options = dict(
DF=[""],
DT=[""],
Site=[request.GET.get('Site', '')],
Room=[request.GET.get('Room', '')],
Visit=[request.GET.get('Visit', '')],
State=[request.GET.get('State', '')]
)
# finding the length of each table and assigning value to zero
# if doesn't contain any selected columns from settings collection
# length is required for colors in the display in download tab
bs_len = len(settings_options['BS'])
if bs_len == 1 and settings_options['BS'][0] == '':
bs_len = 0
mr_len = len(settings_options['MR'])
if mr_len == 1 and settings_options['MR'][0] == '':
mr_len = 0
rr_len = len(settings_options['RR'])
if rr_len == 1 and settings_options['RR'][0] == '':
rr_len = 0
pr_len = len(settings_options['PR'])
if pr_len == 1 and settings_options['PR'][0] == '':
pr_len = 0
# generating a raw sql query based on filters and settings options
# as requested by user
query = """ SELECT """
headers = []
# selecting the required columns selected by the user
for table, columns in settings_options.items():
for column in columns:
if column != '':
# headers for columns in download tab that needs to be
# displayed
headers.append(column)
if table == 'BS' and column != '':
query += "bs.\"" + column + "\", "
if table == 'MR' and column != '':
query += "mr.\"" + column + "\", "
if table == 'RR' and column != '':
query += "rr.\"" + column + "\", "
if table == 'PR' and column != '':
query += "pr.\"" + column + "\", "
query += "bs.\"id\" as \"BloodSampleId\", pr.\"id\" \
as \"ProcessedId\", "
query = query[:-2]
query += """from blood_sample_processedreport as pr
join blood_sample_receiptrecords as rr on \
pr."ParentId" = rr."SampleId"
join blood_sample_manifestrecords as mr on \
rr."Barcode" = mr."Barcode"
join blood_sample_bloodsample as bs on \
bs."Barcode" = mr."Barcode"
"""
extra = ' WHERE '
# adding filtered values as requested by user
for filt, value in filter_options.items():
if filt == 'Site' and value[0] != '':
extra += "mr.\"Site\"='" + value[0] + "' AND "
if filt == 'Room' and value[0] != '':
extra += "mr.\"Room\"='" + value[0] + "' AND "
if filt == 'Visit' and value[0] != '':
extra += "mr.\"Visit\"='" + value[0] + "' AND "
if filt == 'State' and value[0] != '':
val = list(state_status.keys())[
list(state_status.values()).index(value[0])]
extra += "bs.\"State\"='" + val + "' AND "
extra += """ bs.\"CreatedAt\" BETWEEN '{}' AND '{}' AND """\
.format(day.replace(hour=0, minute=0, second=0, microsecond=0),
day.replace(hour=23, minute=59, second=59, microsecond=0))
extra = extra[0:-4]
if extra != ' WH':
query += extra
# ordering the data based on ids and barcode
query += ' order by bs.\"CohortId\", mr.\"Barcode\"'
with connection.cursor() as cursor:
cursor.execute(query)
# Fetch rows using fetchall() method.
data = cursor.fetchall()
# updating the data of enum field with respective data
if 'State' in settings_options['BS']:
ind = headers.index('State')
for row in range(len(data)):
data[row] = list(data[row])
data[row][ind] = state_status[data[row][ind]]
data[row] = tuple(data[row])
if 'Site' in settings_options['MR']:
ind = headers.index('Site')
for row in range(len(data)):
if data[row][ind] is not None:
data[row] = list(data[row])
data[row][ind] = site_choices[data[row][ind]]
data[row] = tuple(data[row])
if 'Visit' in settings_options['MR']:
ind = headers.index('Visit')
for row in range(len(data)):
if data[row][ind] is not None:
data[row] = list(data[row])
data[row][ind] = visit_choices[data[row][ind]]
data[row] = tuple(data[row])
if 'Clinic' in settings_options['RR']:
ind = headers.index('Clinic')
for row in range(len(data)):
if data[row][ind] is not None:
data[row] = list(data[row])
data[row][ind] = site_choices[data[row][ind]]
data[row] = tuple(data[row])
if 'SiteHeld' in settings_options['PR']:
ind = headers.index('SiteHeld')
for row in range(len(data)):
if data[row][ind] is not None:
data[row] = list(data[row])
data[row][ind] = site_held_choices[data[row][ind]]
data[row] = tuple(data[row])
# display names of columns that display in download tab as headers
row_headers = {
'CohortId': 'Cohort id',
'AppointmentId': 'Appointment id',
'SiteNurseEmail': 'Site Nurse Email',
'CreatedAt': 'Appointment date',
'CollectionDateTime': 'Collection Date Time',
'DateTimeTaken': 'Date Time Taken',
'SampleId': 'Sample id',
'TissueSubType': 'Tissue Sub Type',
'ReceivedDateTime': 'Received Date Time',
'VolumeUnit': 'Volume Unit',
'ParentId': 'Parent id',
'ProcessedDateTime': 'Processed Date Time',
'NumberOfChildren': 'Number Of Children',
'SiteHeld': 'Site held',
'id': 'Id'
}
# updating the headers based on the column name
for i in range(len(headers)):
if headers[i] in row_headers:
headers[i] = row_headers[headers[i]]
page = int(request.GET.get('page', 1))
table = request.GET.get('table', 'False')
total_pages = math.ceil(len(data) / settings.ITEMS_PER_PAGE)
items_per_page = settings.ITEMS_PER_PAGE
# records based on page to display
record_start = (page - 1) * items_per_page
record_end = page * items_per_page
if len(data) != 0:
headers.extend(['BloodSample Id', 'Processed Id'])
data = [headers] + data[record_start:record_end]
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': 1 if total_pages == 0 else total_pages,
'current_page': 1 if page == 0 else page,
})
if table == "True":
return render(request, self.processed_review_table_template, {
"objects": data,
"current_page": 1 if page == 0 else page,
"total_pages": 1 if total_pages == 0 else total_pages,
'db_data': data,
'bs_len': bs_len,
'mr_len': mr_len, 'rr_len': rr_len,
'pr_len': pr_len, 'settings': settings_options,
'filters': filter_options,
"class": 'reviewProcessedDay',
})
# Disabling the feature dates
shownextday = datetime.datetime.today().strftime(
'%d%b%y') in [i.strftime('%d%b%y') for i in days]
qry = ""
if request.GET.get('Site', ''):
qry += f" AND mr.\"Site\" = '{request.GET.get('Site')}'"
if request.GET.get('Visit', ''):
qry += f" AND mr.\"Visit\" = '{request.GET.get('Visit')}'"
if request.GET.get('Room', ''):
qry += f" AND mr.\"Room\" = '{request.GET.get('Room')}'"
if request.GET.get('State', ''):
for key, value in state_status.items():
if value == request.GET.get('State'):
qry += f" AND bs.\"State\" = '{key}'"
# Getting Pagination count for Unmatched tables
data_pr = UnmachedProcessedView.get_umatched_pr_data(
self, day, qry)
pr_total_pages = math.ceil(len(data_pr) / settings.ITEMS_PER_PAGE)
data_rr = \
UnmachedProcessedView.get_umatched_rr_data(self, day, "")
rr_total_pages = math.ceil(len(data_rr) / settings.ITEMS_PER_PAGE)
context = {
"current_page": 1 if page == 0 else page,
"total_pages": 1 if total_pages == 0 else total_pages,
'db_data': data,
'bs_len': bs_len,
'mr_len': mr_len, 'rr_len': rr_len,
'pr_len': pr_len,
'settings': settings_options,
'filters': filter_options,
"class": 'reviewProcessedDay',
"days": days,
"active": day,
"shownextday": shownextday,
"pr_total_pages": 1 if pr_total_pages == 0
else pr_total_pages,
"rr_total_pages": 1 if rr_total_pages == 0
else rr_total_pages,
}
return render(request, self.processed_review_template, context)
class UnmachedManifestView(LoginRequiredMixin, View):
"""
Class for getting unmatched Manifest and blood sample records
"""
bs_review_table_template = 'review_manifest/bs_unmatched-table.html'
mf_review_table_template = 'review_manifest/mf_unmatched-table.html'
def get(self, request, *args, **kwargs):
"""
Method to get the unmatched Manifest and blood sample records
:param request: request object
:return: HttpResponse object
"""
umatched_type = request.GET.get("type", "")
page = int(request.GET.get('page', 1))
day, days = UploadView.get_dayformated_and_days(self, request)
if umatched_type == 'umatched_bs':
# Getting Unmatched records comparing Blood sample with
# Manifest records
mf_barcodes = ManifestRecords.objects.filter(
CollectionDateTime__range=(day.replace(hour=0,
minute=0, second=0, microsecond=0), day.replace(
hour=23, minute=59, second=59, microsecond=0))).\
values_list('Barcode', flat=True)[::1]
if mf_barcodes:
query_results = BloodSample.objects.filter(
CreatedAt__range=(day.replace(hour=0, minute=0,
second=0, microsecond=0), day.replace(
hour=23, minute=59, second=59, microsecond=0)), State=0
).exclude(Barcode__iregex=r'(' + '|'.join(mf_barcodes)
+ ')').order_by('Barcode')
else:
query_results = BloodSample.objects.filter(
CreatedAt__range=(day.replace(hour=0, minute=0,
second=0, microsecond=0), day.replace(
hour=23, minute=59, second=59, microsecond=0)), State=0
).order_by('Barcode')
paginator = Paginator(query_results, settings.ITEMS_PER_PAGE)
if paginator.num_pages < page:
page = paginator.num_pages
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': paginator.num_pages,
'current_page': page})
results = paginator.get_page(page)
return render(request, self.bs_review_table_template, {
"objects": results.object_list,
"current_page": page,
"total_pages": paginator.num_pages
})
if umatched_type == 'umatched_mf':
# Getting Unmatched records comparing Manifest records
# with Blood sample
bs_barcodes = BloodSample.objects.filter(
CreatedAt__range=(day.replace(hour=0, minute=0, second=0,
microsecond=0), day.replace(
hour=23, minute=59, second=59, microsecond=0))).\
exclude(State=1).values_list('Barcode', flat=True)[::1]
if bs_barcodes:
query_results = ManifestRecords.objects.filter(
CollectionDateTime__range=(day.replace(hour=0, minute=0,
second=0, microsecond=0), day.replace(
hour=23, minute=59, second=59, microsecond=0))
).exclude(Barcode__iregex=r'(' + '|'.join(bs_barcodes) +
')').order_by('Barcode')
else:
query_results = ManifestRecords.objects.filter(
CollectionDateTime__range=(day.replace(hour=0, minute=0,
second=0, microsecond=0), day.replace(
hour=23, minute=59, second=59, microsecond=0))
).order_by('Barcode')
if request.GET.get('Room', ''):
query_results = query_results.filter(
Room=request.GET.get('Room'))
if request.GET.get('Visit', ''):
query_results = query_results.filter(
Visit=request.GET.get('Visit'))
if request.GET.get('Site', ''):
query_results = query_results.filter(
Site=request.GET.get('Site'))
paginator = Paginator(query_results, settings.ITEMS_PER_PAGE)
if paginator.num_pages < page:
page = paginator.num_pages
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': paginator.num_pages,
'current_page': page
})
results = paginator.get_page(page)
return render(request, self.mf_review_table_template, {
"objects": results.object_list,
"current_page": page,
"total_pages": paginator.num_pages
})
class UnmachedReceiptView(LoginRequiredMixin, View):
"""
Class for getting unmatched Receipt and Manifest records
"""
bs_review_table_template = 'review_receipt/bsr_unmatched-table.html'
rr_review_table_template = 'review_receipt/rr_unmatched-table.html'
def get(self, request, *args, **kwargs):
"""
Method to get the unmatched Receipt and Manifest records
:param request: request object
:return: HttpResponse object
"""
umatched_type = request.GET.get("type", "")
page = int(request.GET.get('page', 1))
day, days = UploadView.get_dayformated_and_days(self, request)
items_per_page = settings.ITEMS_PER_PAGE
qry = ""
if request.GET.get('Site', ''):
qry += f" AND mr.\"Site\" = '{request.GET.get('Site')}'"
if request.GET.get('Visit', ''):
qry += f" AND mr.\"Visit\" = '{request.GET.get('Visit')}'"
if request.GET.get('Room', ''):
qry += f" AND mr.\"Room\" = '{request.GET.get('Room')}'"
if umatched_type == 'umatched_bsr':
# Getting Unmatched records comparing Blood Sample and
# Manifest records with Receipt
data = self.get_umatched_bs_data(day, qry)
total_pages = math.ceil(len(data) / settings.ITEMS_PER_PAGE)
if total_pages < page:
page = total_pages
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': 1 if total_pages == 0 else total_pages,
'current_page': 1 if page == 0 else page
})
# records based on page to display
record_start = (page - 1) * items_per_page
record_end = page * items_per_page
data = data[record_start:record_end]
# Converting State, Visit and Site choices to field names
for row in range(len(data)):
data[row]['State'] = state_status[data[row]['State']]
data[row]['Visit'] = visit_choices[data[row]['Visit']]
data[row]['Site'] = site_choices[data[row]['Site']]
return render(request, self.bs_review_table_template, {
"objects": data,
"current_page": page,
"total_pages": total_pages
})
elif umatched_type == 'umatched_rr':
# Getting Unmatched records comparing Receipt with
# Blood Sample and Manifest records
data = self.get_umatched_rr_data(day, "")
total_pages = math.ceil(len(data) / settings.ITEMS_PER_PAGE)
if total_pages < page:
page = total_pages
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': 1 if total_pages == 0 else total_pages,
'current_page': 1 if page == 0 else page
})
# records based on page to display
record_start = (page - 1) * items_per_page
record_end = page * items_per_page
data = data[record_start:record_end]
# Converting Clinic choices to field names
for row in range(len(data)):
data[row]['Clinic'] = site_choices[data[row]['Clinic']]
return render(request, self.rr_review_table_template, {
"objects": data,
"current_page": 1 if page == 0 else page,
"total_pages": 1 if total_pages == 0 else total_pages
})
def get_umatched_bs_data(self, day, qry=""):
"""
Method to get unmatched records comparing Blood Sample and
Manifest records with Receipt
"""
query = '''
SELECT "mr"."id",
bs."CohortId",
bs."Barcode" as "BloodSampleBarcode",
bs."AppointmentId",
bs."SiteNurseEmail",
bs."Comments",
bs."CreatedAt",
bs."State",
mr."Barcode" as "ManifestBarcode",
mr."Visit",
mr."Site",
mr."Room",
mr."CollectionDateTime",
mr."Comments" as "ManifestComments"
FROM blood_sample_manifestrecords as mr
INNER JOIN blood_sample_bloodsample as bs \
ON ( mr."Barcode" = bs."Barcode")
WHERE mr."Barcode" not in \
(select "Barcode" from blood_sample_receiptrecords)
AND mr."CollectionDateTime" BETWEEN '{}' AND '{}'{}
order by bs."Barcode";
'''.format(day.replace(hour=0, minute=0, second=0, microsecond=0)
.strftime("%Y-%m-%d %H:%M:%S"),
day.replace(hour=23, minute=59, second=59,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"), qry)
with connection.cursor() as cursor:
cursor.execute(query)
columns = [col[0] for col in cursor.description]
data = [
dict(zip(columns, row))
for row in cursor.fetchall()
]
return data
def get_umatched_rr_data(self, day, qry=""):
"""
Method to get Unmatched records comparing Receipt with
Blood Sample and Manifest records
"""
query = '''
SELECT
rr."Barcode",
rr."id",
rr."Clinic",
rr."DateTimeTaken",
rr."TissueSubType",
rr."ReceivedDateTime",
rr."Volume",
rr."VolumeUnit",
rr."Condition",
rr."Comments" as "ReceiptComments"
FROM blood_sample_receiptrecords as rr
WHERE rr."Barcode" not in (\
SELECT
"bs"."Barcode"
FROM blood_sample_manifestrecords as mr
INNER JOIN blood_sample_bloodsample as bs ON \
( mr."Barcode" = bs."Barcode" )
)
AND rr."DateTimeTaken" BETWEEN '{}' AND '{}'{}
order by rr."Barcode";
'''.format(day.replace(hour=0, minute=0, second=0,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"),
day.replace(hour=23, minute=59, second=59,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"), qry)
with connection.cursor() as cursor:
cursor.execute(query)
columns = [col[0] for col in cursor.description]
data = [
dict(zip(columns, row))
for row in cursor.fetchall()
]
return data
class UnmachedProcessedView(LoginRequiredMixin, View):
"""
Class for getting unmatched Processed and Receipt records
"""
rr_review_table_template = 'review_processed/rr_unmatched-table.html'
pr_review_table_template = 'review_processed/pr_unmatched-table.html'
def get(self, request, *args, **kwargs):
"""
Method to get the unmatched Processed and Receipt records
:param request: request object
:return: HttpResponse object
"""
umatched_type = request.GET.get("type", "")
page = int(request.GET.get('page', 1))
day, days = UploadView.get_dayformated_and_days(self, request)
items_per_page = settings.ITEMS_PER_PAGE
qry = ""
if request.GET.get('Site', ''):
qry += f" AND mr.\"Site\" = '{request.GET.get('Site')}'"
if request.GET.get('Visit', ''):
qry += f" AND mr.\"Visit\" = '{request.GET.get('Visit')}'"
if request.GET.get('Room', ''):
qry += f" AND mr.\"Room\" = '{request.GET.get('Room')}'"
if umatched_type == 'umatched_rr':
# Getting Unmatched records comparing Blood Sample and
# Manifest and Receipt records with Processed records
if request.GET.get('State', ''):
for key, value in state_status.items():
if value == request.GET.get('State'):
qry += f" AND bs.\"State\" = '{key}'"
data = self.get_umatched_rr_data(day, qry)
total_pages = math.ceil(len(data) / settings.ITEMS_PER_PAGE)
if total_pages < page:
page = total_pages
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': 1 if total_pages == 0 else total_pages,
'current_page': 1 if page == 0 else page})
# records based on page to display
record_start = (page - 1) * items_per_page
record_end = page * items_per_page
data = data[record_start:record_end]
# Converting State, Visit, Clinic and Site choices to field names
for row in range(len(data)):
data[row]['State'] = state_status[data[row]['State']]
data[row]['Visit'] = visit_choices[data[row]['Visit']]
data[row]['Site'] = site_choices[data[row]['Site']]
data[row]['Clinic'] = site_choices[data[row]['Clinic']]
return render(request, self.rr_review_table_template, {
"objects": data,
"current_page": page,
"total_pages": total_pages
})
elif umatched_type == 'umatched_pr':
# Getting Unmatched records comparing Processed records with
# Blood Sample and Manifest and Receipt records
data = self.get_umatched_pr_data(day, "")
total_pages = math.ceil(len(data) / settings.ITEMS_PER_PAGE)
if total_pages < page:
page = total_pages
if request.GET.get('get_pages', 'False') == 'True':
return JsonResponse({
'status': 200,
'total_pages': 1 if total_pages == 0 else total_pages,
'current_page': 1 if page == 0 else page
})
# records based on page to display
record_start = (page - 1) * items_per_page
record_end = page * items_per_page
data = data[record_start:record_end]
# Converting the choice field
for row in range(len(data)):
data[row]['SiteHeld'] = site_held_choices[data[row]['SiteHeld']]
return render(request, self.pr_review_table_template, {
"objects": data,
"current_page": 1 if page == 0 else page,
"total_pages": 1 if total_pages == 0 else total_pages
})
def get_umatched_pr_data(self, day, qry=""):
"""
Method to grt unmatched records comparing Processed records with
Blood Sample and Manifest and Receipt records
"""
query = '''
SELECT "pr"."id",
pr."ParentId",
pr."Barcode",
pr."ProcessedDateTime",
pr."Volume",
pr."NumberOfChildren",
pr."Comments",
pr."SiteHeld"
FROM blood_sample_processedreport as pr
WHERE pr."ParentId" NOT IN (
SELECT
rr."SampleId"
FROM blood_sample_receiptrecords as rr
inner join blood_sample_manifestrecords as mr on \
rr."Barcode" = mr."Barcode"
inner join blood_sample_bloodsample as bs on \
bs."Barcode" = mr."Barcode"
) AND pr."ImportId_id" IN (
SELECT pi."id"
FROM blood_sample_processedimports as pi
WHERE pi."CreatedAt" BETWEEN '{}' AND '{}'{}
)
ORDER BY pr."ParentId";
'''.format(day.replace(hour=0, minute=0, second=0,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"),
day.replace(hour=23, minute=59, second=59,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"), qry)
with connection.cursor() as cursor:
cursor.execute(query)
columns = [col[0] for col in cursor.description]
data = [
dict(zip(columns, row))
for row in cursor.fetchall()
]
return data
def get_umatched_rr_data(self, day, qry=""):
"""
Method to get unmatched records comparing Blood Sample and
Manifest and Receipt records with Processed records
"""
query = '''
SELECT
bs."CohortId",
bs."AppointmentId",
bs."Barcode" as "BloodSampleBarcode",
bs."Comments",
bs."SiteNurseEmail",
bs."CreatedAt",
bs."State",
mr."id" as "ManifestId",
mr."Barcode" as "ManifestBarcode",
mr."CohortId" as "ManifestCohortId",
mr."Site",
mr."Visit",
mr."Room",
mr."CollectionDateTime",
mr."Comments" as "ManifestComments",
rr."id" as "ReceiptId",
rr."Barcode" as "ReceiptBarcode",
rr."Clinic",
rr."DateTimeTaken",
rr."TissueSubType",
rr."ReceivedDateTime",
rr."Volume",
rr."VolumeUnit",
rr."SampleId",
rr."Comments" as "ReceiptComments"
FROM blood_sample_receiptrecords as rr
inner join blood_sample_manifestrecords as mr on \
rr."Barcode"=mr."Barcode"
inner join blood_sample_bloodsample as bs on \
bs."Barcode"=mr."Barcode"
WHERE rr."SampleId" not in (
SELECT
"pr"."ParentId"
FROM blood_sample_processedreport as pr
join blood_sample_receiptrecords as rr on \
pr."ParentId"=rr."SampleId"
join blood_sample_manifestrecords as mr on \
rr."Barcode"=mr."Barcode"
join blood_sample_bloodsample as bs on \
bs."Barcode" = mr."Barcode"
)
AND bs."State" = '0'
AND mr."CollectionDateTime" BETWEEN '{}' AND '{}'{}
order by bs."Barcode";
'''.format(day.replace(hour=0, minute=0, second=0,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"),
day.replace(hour=23, minute=59, second=59,
microsecond=0).strftime("%Y-%m-%d %H:%M:%S"), qry)
with connection.cursor() as cursor:
cursor.execute(query)
columns = [col[0] for col in cursor.description]
data = [
dict(zip(columns, row))
for row in cursor.fetchall()
]
return data
|
990,318 | 9610c1fcce2b3fab3b79e2a8d76bab7ac9e690e2 | import struct
from enum import Enum
from enum import IntEnum
from tls_parser.exceptions import NotEnoughData, UnknownTypeByte, UnknownTlsVersionByte
from tls_parser.tls_version import TlsVersionEnum
from typing import Tuple, Sequence
class TlsRecordTlsVersionBytes(Enum):
SSLV3 = b"\x03\x00"
TLSV1 = b"\x03\x01"
TLSV1_1 = b"\x03\x02"
TLSV1_2 = b"\x03\x03"
class TlsRecordTypeByte(IntEnum):
CHANGE_CIPHER_SPEC = 0x14
ALERT = 0x15
HANDSHAKE = 0x16
APPLICATION_DATA = 0x17
HEARTBEAT = 0x18
class TlsRecordHeader:
def __init__(self, record_type: TlsRecordTypeByte, tls_version: TlsVersionEnum, record_length: int) -> None:
self.type = record_type
self.tls_version = tls_version
self.length = record_length
@classmethod
def from_bytes(cls, raw_bytes: bytes) -> Tuple["TlsRecordHeader", int]:
if len(raw_bytes) < 5:
raise NotEnoughData()
record_type = TlsRecordTypeByte(struct.unpack("B", raw_bytes[0:1])[0])
try:
tls_version = TlsRecordTlsVersionBytes(raw_bytes[1:3])
except ValueError as e:
raise UnknownTlsVersionByte(e.args[0], record_type)
record_length = struct.unpack("!H", raw_bytes[3:5])[0]
return TlsRecordHeader(record_type, TlsVersionEnum[tls_version.name], record_length), 5
def to_bytes(self) -> bytes:
as_bytes = b""
# TLS Record type - 1 byte
as_bytes += struct.pack("B", self.type.value)
# TLS version - 2 bytes
as_bytes += TlsRecordTlsVersionBytes[self.tls_version.name].value
# Length - 2 bytes
as_bytes += struct.pack("!H", self.length)
return as_bytes
class TlsSubprotocolMessage:
# Handshake, Alert, etc.
def __init__(self, message_data: bytes) -> None:
self.message_data = message_data
def to_bytes(self) -> bytes:
return self.message_data
@property
def size(self) -> int:
return len(self.to_bytes())
class TlsRecord:
def __init__(self, record_header: TlsRecordHeader, subprotocol_messages: Sequence[TlsSubprotocolMessage]) -> None:
self.header = record_header
# Several messages can be concatenated into a single record; the messages must belong to the same subprotocol
# Hence, in practice this only seems to apply to the handshake protocol
if self.header.type != TlsRecordTypeByte.HANDSHAKE and len(subprotocol_messages) != 1:
raise ValueError("Received multiple subprotocol messages for a non-handshake record")
self.subprotocol_messages = subprotocol_messages
@classmethod
def from_bytes(cls, raw_bytes: bytes) -> Tuple["TlsRecord", int]:
record_header, len_consumed = TlsRecordHeader.from_bytes(raw_bytes)
# Try to parse the record
if record_header.type not in TlsRecordTypeByte:
raise UnknownTypeByte()
record_data = raw_bytes[len_consumed : len_consumed + record_header.length] # noqa: E203
if len(record_data) < record_header.length:
raise NotEnoughData()
# We do not attempt to parse the message - the data may actually contain multiple messages
message = TlsSubprotocolMessage(record_data)
return TlsRecord(record_header, [message]), len_consumed + record_header.length
def to_bytes(self) -> bytes:
as_bytes = b""
as_bytes += self.header.to_bytes()
for message in self.subprotocol_messages:
as_bytes += message.to_bytes()
return as_bytes
|
990,319 | f5e5e045ca7b180236f23679a34a8ffc7d86d6da | import graphene
from graphene_django import DjangoObjectType
from todocore.models import Todo, Task , Comment, File
import json
from todocore.users.schema import UserType
class TodoType(DjangoObjectType):
class Meta:
model = Todo
class Query(object):
all_todos = graphene.List(TodoType)
def resolve_all_todos(self, info, **kwargs):
return Todo.objects.filter(todoUser = info.context.user.id)
class CreateTodo(graphene.Mutation):
id = graphene.Int()
todoUser = graphene.Field(UserType)
todoName = graphene.String()
todoDate = graphene.String()
class Arguments:
todoName = graphene.String(required= True)
todoDate = graphene.String(required=True)
def mutate(self, info, todoName, todoDate):
todo = Todo(
todoUser = info.context.user,
todoName = todoName,
todoDate = todoDate,
)
todo.save()
return CreateTodo(
id = todo.id,
todoUser = todo.todoUser,
todoName = todo.todoName,
todoDate = todo.todoDate
)
class UpdateTodo(graphene.Mutation):
id = graphene.Int()
todoName = graphene.String()
class Arguments:
todoId = graphene.String(required=True)
todoName = graphene.String(required=True)
def mutate(self, info, todoId ,todoName):
todoInstance = Todo.objects.get(id= int(todoId))
todoInstance.todoName = todoName
todoInstance.save()
return UpdateTodo(
id = todoInstance.id,
todoName = todoInstance.todoName,
)
class DeleteTodo(graphene.Mutation):
id = graphene.Int()
todoName = graphene.String()
class Arguments:
todoId = graphene.String(required=True)
def mutate(self, info, todoId ):
todoInstance = Todo.objects.get(id = int(todoId))
tasks = Task.objects.filter(todo = todoInstance)
for task in tasks:
comments = Comment.objects.filter(task = task)
for comment in comments:
File.objects.filter(comment = comment).delete()
comment.delete()
task.delete()
todoInstance.delete()
return DeleteTodo(
id = todoInstance.id,
todoName = todoInstance.todoName,
)
class Mutation(graphene.ObjectType):
create_todo = CreateTodo.Field()
update_todo = UpdateTodo.Field()
delete_todo = DeleteTodo.Field()
|
990,320 | 0952efa709c583f2a1249aad3b6533ed90c4a362 | #!/usr/bin/env python
# -*-encoding:UTF-8-*-
import os
# os.path.abspath(__file__) will return the absolute path of .py file(full path)
# os.path.dirname(__file__) will return the file dir of .py
# basedir is the root dir:ICQBPMSSOJ
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# postgresql:default port 5432, use 5435 to avoid being the same
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '127.0.0.1',
'PORT': 5435,
'NAME': "icqbpmssoj",
'USER': "icqbpmssoj",
'PASSWORD': 'icqbpmssoj'
}
}
# redis:default port6379, use 6380 to avoid being the same
REDIS_CONF = {
"host": "127.0.0.1",
"port": "6380"
}
# When DEBUG is True and ALLOWED_HOSTS is empty, the host is validated against ['localhost', '127.0.0.1', '[::1]'].
DEBUG = True
# A list of strings representing the host/domain names that this Django site can serve.
# A value of '*' will match anything; in this case you are responsible to provide your own
# validation of the Host header (perhaps in a middleware; if so this middleware must
# be listed first in MIDDLEWARE)
ALLOWED_HOSTS = ["*"]
# 注意这个路径是对应的开发环境的,就是在Pycharm里面的工程目录
# this path is dev path ,root dir is ICQBPMSSOJ
DATA_DIR = f"{BASE_DIR}/data"
|
990,321 | d3e09f7dd9a5411c6e750b463be406bec3b4c4d6 | import cmath
a = int(input())
b = int(input())
c = int(input())
d = (b**2)-(4*c*a)
root1 = (-b-cmath.sqrt(d))/(2*a)
root2 = (-b+cmath.sqrt(d))/(2*a)
print("the roots are {} and {}".format(root1,root2))
|
990,322 | f9b67f3566aa027474546f5bbbe1222ab78defc0 | import numpy as np
def slice2d_intrange(dists, trange):
"""
Retrieves the indices of all samples in the specified time range from a particle distribution list.
Input
------
dists: list of particle distributions
trange: array of float
Time range to find distributions in; must be in unix time
Returns
-------
numpy array containing the indices that fall within the specified trange
"""
out = []
for idx, dist in enumerate(dists):
time = dist['start_time'] + (dist['end_time']-dist['start_time'])/2.0
if trange[0] <= time <= trange[1]:
out.append(idx)
return np.array(out)
|
990,323 | 3a8d4aa38835d8dff1ebb67620bd2d79547b7702 | #&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LIBRERIAS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
import tkinter as tk
from tkinter import *
from tkinter import messagebox as MessageBox
import time
import sys
import os
import subprocess
from subprocess import Popen, PIPE, STDOUT
from io import open
import threading
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
exit=False #variable
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
listamenu=["Menu de Opciones:", "1--ip_table ", "2--hostapd ", "3--dnsmasq ", "4--up server", "5--exit"]#Menu Princcipal
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def menu():
print("\033[1;31;1m ")
os.system('figlet Twin')
print("\033[1;37;1m ")
print(" "+listamenu[0])
print("\033[1;37;m ")
print(" "+listamenu[1])
print(" "+listamenu[2])
print(" "+listamenu[3])
print(" "+listamenu[4])
print(" "+listamenu[5])
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ config_route_tables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
def config_route_tables():
global wlan
global ip
os.system("ifconfig")
wlan=input("Introduzca Wlan: ")
ip=input("Introduzca ip interfaz: ")
print(wlan)
print(ip)
while True:
try:
time.sleep(0.8)
print("#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
print("#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ config_route_iptables ")
print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
print("Config iptables & router")
print("Procesando")
os.system('iptables-save > /root/dsl.fw')
time.sleep(0.3)
os.system('ifconfig '+wlan+' up '+ip+' netmask 255.255.255.0')# creacion de asociacion de ip wlan0
time.sleep(0.3)
os.system('ip route add '+ip+' via 192.168.1.1 dev '+wlan)#ruting config
time.sleep(0.3)
os.system('iptables --table nat --append POSTROUTING --out-interface eth0 -j MASQUERADE') # elige una wifi o eth0 conf_net_in
time.sleep(0.3)
os.system('iptables --append FORWARD --in-interface '+wlan+' -j ACCEPT')
time.sleep(0.3)
os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
time.sleep(0.3) # futuro variables rango de ip
os.system('iptables -t nat -L')
print("configiracion iptable & ruting ok")
print("#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
print("#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ config_route_iptables ")
print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
time.sleep(0.8)
return wlan,ip
break
except TypeError:
MessageBox.showerror("Error", "Ha ocurrido un error inesperado.")
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ config_hostpad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
def configurar_hostapd(wlan):
global Banda
global Channel
global name_route_victima_anzuelo
print("Config hostapd.conf")
print("Procesando")
Banda=input("introduzca banda")
Channel=input("introduca canal")
name_route_victima_anzuelo=input("nombre del router")
print(name_route_victima_anzuelo)# nombre falso de nuestro punto wifi
print(wlan)
print(Banda)
print(Channel)
os.system("touch /root/mis_funciones_python3/hostapd.conf")
file1 = open("/root/mis_funciones_python3/hostapd.conf","w")
time.sleep(0.3)
file1.write('interface='+wlan+'\n')
file1.write('driver=nl80211'+'\n')
file1.write('ssid='+name_route_victima_anzuelo+'\n')
file1.write('hw_mode='+Banda+'\n')
file1.write('channel='+Channel+'\n')
file1.write('macaddr_acl=0'+'\n') # colocar seguridad wpa2 simple
file1.write('auth_algs=1'+'\n')
file1.write('ignore_broadcast_ssid=0'+'\n')
file1.close()
time.sleep(1)
print("ejecutando hostapd.conf y ejecutando hostapd")
conf1=threading.Thread(target=hostapd_go, args=())
conf1.start()
def hostapd_go(**datos):
while True:
try:
proceso1=Popen(['x-terminal-emulator', '-e', 'hostapd', '/root/mis_funciones_python3/hostapd.conf'], stdout=PIPE, stderr=PIPE)
stdout, stderr=proceso1.communicate()
break
except TypeError:
MessageBox.showerror("Error", "Ha ocurrido un error inesperado.")
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ config_dnsmasq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
def configurar_dnsmasq(ip,wlan):
print("Procesando dnsmasq")
print(ip)
print(wlan)
os.system("touch /root/mis_funciones_python3/dnsmasq.conf")
file2 = open("/root/mis_funciones_python3/dnsmasq.conf","w")
file2.write('interface='+wlan+'\n')
file2.write('dhcp-range=192.168.1.1,192.168.1.25,255.255.255.0,1h'+'\n') # tipo C
file2.write('dhcp-option=3,'+ip+'\n')
file2.write('dhcp-option=6,'+ip+'\n')
file2.write('server=8.8.8.8'+'\n')
file2.write('log-queries'+'\n')
file2.write('log-dhcp'+'\n')
file2.write('listen-address=127.0.0.1'+'\n')
file2.close()
time.sleep(1)
print("ejecutando dnsmasq.conf")
conf1=threading.Thread(target=dnsmasq_go, args=())
conf1.start()
def dnsmasq_go(**datos):
while True:
try:
proceso2=Popen(['x-terminal-emulator', '-e', 'dnsmasq -C /root/mis_funciones_python3/dnsmasq.conf -d'], stdout=PIPE, stderr=PIPE)
stdout, stderr=proceso2.communicate()
break
except TypeError:
MessageBox.showerror("Error", "Ha ocurrido un error inesperado.")
while exit==False:
menu()
key=(int(input(" "+"Select: ")))
if (key==1):
config_route_tables()
elif (key==2):
configurar_hostapd(wlan)
elif (key==3):
configurar_dnsmasq(ip,wlan)
elif (key==4):
os.system("python3 /root/mis_funciones_python3/apache.py")
elif (key==5):
exit=True
print("\033[1;31;1m ")
print("Smp_A byTe_Dey_bYte_HackiNg")
print("\033[1;31;m ") |
990,324 | b57a87a3a5e47b37f10ebb2f8738739c2a3c2ba5 | #!/usr/bin/python
# airfoil slicer
# accepts coordinate CSVs from http://airfoiltools.com/plotter
# outputs gcode
from math import *
class GCodeGen:
def __init__(self):
self.extruderTemp = 190.
self.filamentDiameter = 1.75
self.extruderDiameter = 0.4
self.layerHeight = 0.3
self.travelSpeed=150.
self.liftSpeed=130.
self.retractSpeed=40.
self.printSpeed=90.
self.bridgeSpeed=45.
self.X = 0.
self.Y = 0.
self.Z = 0.
self.E = 0.
self.F = 5000.
self.gCodeLines = []
self.writeHeader()
def beginLayer(self,layerHeight=None):
if layerHeight is not None:
self.layerHeight = layerHeight
self.resetExtruder()
self.writeG1(Z=self.Z+self.layerHeight,F=self.liftSpeed*60.)
def retract(self,length=2.0,lift=0.5):
self.writeG1(E=self.E-length, F=self.retractSpeed*60.)
self.writeG1(Z=self.Z+lift, F=self.liftSpeed*60.)
def unRetract(self,length=2.0,lift=0.5):
self.writeG1(Z=self.Z-lift, F=self.liftSpeed*60.)
self.writeG1(E=self.E+length, F=self.retractSpeed*60.)
def resetExtruder(self):
self.gCodeLines.append('G92 E0')
self.E = 0.
def drawLine(self, p1, p2, lineWidth=None, bridge=False):
if lineWidth is None:
lineWidth = self.extruderDiameter
x1 = p1[0]
y1 = p1[1]
x2 = p2[0]
y2 = p2[1]
if self.X != x1 or self.Y != y1:
# line does not start at current location, travel there
if sqrt((self.X-x1)**2+(self.Y-y1)**2) > 2.:
self.retract()
self.writeG1(X=x1, Y=y1, F=self.travelSpeed*60.)
self.unRetract()
else:
self.writeG1(X=x1, Y=y1, F=self.travelSpeed*60.)
extrusion = self.getExtrusionForMoveLength(x1,y1,x2,y2,lineWidth,bridge)
speed = self.bridgeSpeed if bridge else self.printSpeed
self.writeG1(X=x2, Y=y2, E=self.E+extrusion,F=speed*60.)
def getExtrusionForMoveLength(self, x1,y1,x2,y2, layerWidth, bridge):
moveLength = sqrt((x2-x1)**2+(y2-y1)**2)
h = self.layerHeight*0.5
r = layerWidth*0.5
if bridge:
trackArea = pi*(self.extruderDiameter*0.5)**2 * .95 # 5% stretch
else:
trackArea = pi*r**2 - 2.*(0.5 * r**2 * 2.*acos(h/r) - 0.5 * h * sqrt(r**2-h**2))
filamentArea = pi*(self.filamentDiameter*0.5)**2
return moveLength*trackArea/filamentArea
def writeG1(self, X=None, Y=None, Z=None, E=None, F=None):
args = ""
if X is not None and X != self.X:
self.X = X
args += " X%f" % (self.X)
if Y is not None and Y != self.Y:
self.Y = Y
args += " Y%f" % (self.Y)
if Z is not None and Z != self.Z:
self.Z = Z
args += " Z%f" % (self.Z)
if E is not None and E != self.E:
self.E = E
args += " E%f" % (self.E)
if F is not None and F != self.F:
self.F = F
args += " F%f" % (self.F)
if args != "":
self.gCodeLines.append("G1"+args)
def writeHeader(self):
self.gCodeLines.extend([
"; generated by GCodeGen.py",
"M107",
"M190 S50 ; set bed temperature",
"G28 X0 Y0 Z0 ; home all axes",
"G1 Z5 F5000 ; lift nozzle",
"M109 S%u ; set the extruder temp and wait" % (int(self.extruderTemp)),
"G28 X0 Y0 Z0 ; Home Z again in case there was filament on nozzle",
"G29 ; probe the bed",
"G21 ; set units to millimeters",
"G90 ; use absolute coordinates",
"M82 ; use absolute distances for extrusion",
"G92 E0",
])
def writeFooter(self):
self.retract()
self.gCodeLines.extend([
"M104 S0 ; turn off temperature",
"G1 X10 Y200",
"M84 ; disable motors"
])
def output(self, fname):
self.writeFooter()
f = open(fname,'w')
f.truncate()
f.write("\n".join(self.gCodeLines))
f.close()
|
990,325 | abe85a8bfb46a7099320f9a6d1cc44f5057ea62b | import random
import numpy as np
import scipy.ndimage
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import platform
import odl
import pydicom as dc
from scipy.misc import imresize
import util as ut
# Objects of the data_pip type require to admit a method load_data that
# returns a single image from the dataset the method is linked to, in the
# format image_size.
# The image is always normalized between (0,1).
class data_pip(object):
image_size = (128,128)
name = 'default'
colors = 1
# load data outputs single image in format (image_size, colors)
def load_data(self, training_data= True):
pass
# returns 128x128 image from the BSDS dataset
class BSDS(data_pip):
name = 'BSDS'
colors = 3
def __init__(self):
# set up the training data file system
self.train_list = ut.find('*.jpg', './BSDS/images/train')
self.train_amount = len(self.train_list)
print('Training Pictures found: ' + str(self.train_amount))
self.eval_list = ut.find('*.jpg', './BSDS/images/val')
self.eval_amount = len(self.eval_list)
print('Evaluation Pictures found: ' + str(self.eval_amount))
# method to draw raw picture samples
def single_image(self, training_data=True):
if training_data:
rand = random.randint(0, self.train_amount - 1)
pic = mpimg.imread(self.train_list[rand])
else:
rand = random.randint(0, self.eval_amount - 1)
pic = scipy.ndimage.imread(self.eval_list[rand])
return pic/255.0
# Draw random edgepoint
def edgepoint(self, x_size, y_size):
x_vary = x_size - self.image_size[0]
x_coor = random.randint(0, x_vary)
y_vary = y_size - self.image_size[1]
y_coor = random.randint(0, y_vary)
upper_left = [x_coor, y_coor]
lower_right = [x_coor + self.image_size[0], y_coor + self.image_size[1]]
return upper_left, lower_right
# methode to cut a image_size area out of the training images
def load_data(self, training_data= True):
pic = self.single_image(training_data=training_data)
size = pic.shape
ul, lr = self.edgepoint(size[0], size[1])
image = pic[ul[0]:lr[0], ul[1]:lr[1],:]
return image
# returns 128x128 image from the LUNA challenge dataset
class LUNA(data_pip):
name = 'LUNA'
colors = 1
def __init__(self):
name = platform.node()
Train_Path = ''
Eval_Path = ''
if name == 'LAPTOP-E6AJ1CPF':
Train_Path = './LUNA/Train_Data'
Eval_Path = './LUNA/Eval_Data'
elif name == 'motel':
Train_Path = '/local/scratch/public/sl767/LUNA/Training_Data'
Eval_Path = '/local/scratch/public/sl767/LUNA/Evaluation_Data'
# List the existing training data
self.training_list = ut.find('*.dcm', Train_Path)
self.training_list_length = len(self.training_list)
print('Training Data found: ' + str(self.training_list_length))
self.eval_list = ut.find('*.dcm', Eval_Path)
self.eval_list_length = len(self.eval_list)
print('Evaluation Data found: ' + str(self.eval_list_length))
# methodes for obtaining the medical data
def get_random_path(self, training_data= True):
if training_data:
path = self.training_list[random.randint(0, self.training_list_length-1)]
else:
path = self.eval_list[random.randint(0, self.eval_list_length - 1)]
return path
# resizes image to format 128x128
def reshape_pic(self, pic):
pic = ut.normalize_image(pic)
pic = imresize(pic, [128, 128])
pic = ut.scale_to_unit_intervall(pic)
return pic
# the data method
def load_data(self, training_data= True):
k = -10000
pic = np.zeros((128,128))
while k < 0:
try:
path = self.get_random_path(training_data=training_data)
dc_file = dc.read_file(path)
pic = dc_file.pixel_array
if pic.shape == (512,512):
pic = self.reshape_pic(pic)
k = 1
except UnboundLocalError:
k = - 10000
print('UnboundLocalError caught')
except TypeError:
k = -10000
print('TypeError caught')
output = np.zeros((128,128,1))
output[...,0] = pic
return output
# returns 128x128 image of randomly sampled ellipses
class ellipses(data_pip):
name = 'ellipses'
colors = 1
def __init__(self):
self.space = odl.uniform_discr([-64, -64], [64, 64], [self.image_size[0], self.image_size[1]],
dtype='float32')
# generates one random ellipse
def random_ellipse(self, interior=False):
if interior:
x_0 = np.random.rand() - 0.5
y_0 = np.random.rand() - 0.5
else:
x_0 = 2 * np.random.rand() - 1.0
y_0 = 2 * np.random.rand() - 1.0
return ((np.random.rand() - 0.5) * np.random.exponential(0.4),
np.random.exponential() * 0.2, np.random.exponential() * 0.2,
x_0, y_0,
np.random.rand() * 2 * np.pi)
# generates odl space object with ellipses
def random_phantom(self, spc, n_ellipse=50, interior=False):
n = np.random.poisson(n_ellipse)
ellipses = [self.random_ellipse(interior=interior) for _ in range(n)]
return odl.phantom.ellipsoid_phantom(spc, ellipses)
def load_data(self, training_data= True):
pic = self.random_phantom(spc= self.space)
output = np.zeros((128, 128, 1))
output[..., 0] = ut.scale_to_unit_intervall(pic)
return output
# methode to test the data generation pipelines
if __name__ == '__main__':
models = []
models.append(BSDS())
models.append(LUNA())
models.append(ellipses())
plt.figure()
amount_models = len(models)
for k in range(amount_models):
plt.subplot(1, amount_models, k+1)
image = models[k].load_data()
plt.imshow(image)
max = round(np.amax(image), 2)
min = round(np.amin(image), 2)
av = round(np.average(image), 2)
plt.title('Max: {} \n Min: {} \n Av.: {}'.format(max, min, av))
plt.axis('off')
plt.savefig('Saves/Test/test_preprocessing.jpg')
plt.close()
|
990,326 | d2c3f4b2c8d8b3261ebb3c2ad76ab619add5a56a | """Convolutional implementation of N-Dimensional Neighborhood Thresholding.
Functions
---------
conv_ndnt
"""
import itertools
import functools
import numpy as np
import scipy.ndimage as ndi
def conv_ndnt(data, shape, threshold):
"""Compute the N-Dimensional neighborhood threshold of an image.
Parameters
----------
data : array_like
The image to threshold.
shape : tuple of int
The shape of the neighborhood around each pixel.
threshold : float
Threshold value between 0 and 1.
Returns
-------
thresholded : array_like
The thresholded image.
"""
int_img = functools.reduce(np.cumsum, range(data.ndim - 1, -1, -1), data)
kernel = create_kernel(shape)
sums = ndi.filters.convolve(int_img, kernel, mode='nearest')
counts = functools.reduce(np.multiply, map(
_countvolve, zip(
np.meshgrid(*map(np.arange, data.shape), indexing='ij', sparse=True),
shape)))
out = np.ones(data.ravel().shape, dtype=np.bool)
out[data.ravel() * counts.ravel() <= sums.ravel() * threshold] = False
return out.astype(np.uint8).reshape(data.shape)
def _countvolve(args):
kernel = np.zeros(args[1] + 1)
kernel[0] = 1
kernel[-1] = -1
return ndi.filters.convolve(
args[0].ravel(),
kernel,
mode='nearest').reshape(args[0].shape)
def create_kernel(shape):
"""Create the n-dimensional NDNT kernel.
Parameters
----------
shape : tuple of int
The shape of the neighborhood around each pixel.
Returns
-------
kernel : array_like
"""
indices = np.array(list(itertools.product([-1, 0],
repeat=len(shape))))
ref = np.sum(indices[0]) & 1
parity = np.array([1 if (-sum(i) & 1) == ref else -1 for i in indices])
indices = tuple([indices[:, i] for i in range(indices.shape[1])])
kernel = np.zeros(shape, dtype=np.float32)
kernel[indices] = parity
return kernel
|
990,327 | a66f26f6d75bbcd7ffac3ad250df09fc8822c9bb | from django.urls import include, path, reverse
from . import views,views_latex
urlpatterns = [
path('', views.index, name='index'),
path('exams/',views.examlist,name='examlist'),
path('exams/<int:exam_id>/',views.examdetail,name="examdetail"),
path('exams/<int:exam_id>/add_question/<int:question_id>',views.add_to_exam,name="add_to_exam"),
path('exams/<int:exam_id>/delete_question/<int:examitem_id>',views.delete_from_exam,name="delete_from_exam"),
path('exams/<int:exam_id>/up/<int:examitem_id>',views.up,name="up"),
path('exams/<int:exam_id>/down/<int:examitem_id>',views.down,name="down"),
path('new/',views.new,name='new'),
path('<int:question_id>/', views.detail, name='detail'),
path('<int:question_id>/delete',views.delete,name='delete'),
path('<int:question_id>/regen_jpg',views.regen_jpg_view,name='regen_jpg'),
path('add/',views.add, name='add'),
path('exams/add/',views.addexam, name='addexam'),
path('exams/<int:exam_id>/preview/', views_latex.preview, name="preview"),
# redirect only
path('exams/<int:exam_id>/questions/<int:question_id>', views.redirect2q,name="redirect2q"),
path('exams/<int:exam_id>/new_question/',views.redirect2q,name="redirect2q")
] |
990,328 | 8755eca31306c168c3e97f31bebc16d56c67bc4b | # Absolute local
from bgt import create_app
app = create_app()
|
990,329 | 6c04de5a47ef83261f2c400248c193139ef9fac5 | #!/usr/bin/python3
import urllib.request
import sys
import logging
import tempfile
import os
import re
import json
import argparse
import shutil
import stat
from zipfile import ZipFile
libindex = {
'win32': {
'x64': {
'generic': [
{
'name': 'clang',
'version': '10.0.0',
'url': 'https://www.dropbox.com/s/04oglvnci4x00fa/clang-10.0.0-x64-windows-release.zip?dl=1',
'defs': {
'CMAKE_C_COMPILER': '$$HOME$$/bin/clang-cl.exe',
'CMAKE_CXX_COMPILER': '$$HOME$$/bin/clang-cl.exe',
}
}
],
'debug': [
{
'name': 'antlr4-runtime-cpp',
'version': '4.8',
'url': 'https://www.dropbox.com/s/fkfcvxxoml9mzaa/antlr4-4.8-x64-windows-debug.zip?dl=1'
},
{
'name': 'llvm',
'version': '10.0.0',
'url': 'https://www.dropbox.com/s/oayfcbdp6neirp6/llvm-10.0.0-x64-windows-debug.zip?dl=1'
}
],
'release': [
{
'name': 'antlr4-runtime-cpp',
'version': '4.8',
'url': 'https://www.dropbox.com/s/c08fuwh63upe1ux/antlr4-4.8-x64-windows-release.zip?dl=1'
},
{
'name': 'llvm',
'version': '10.0.0',
'url': 'https://www.dropbox.com/s/y27mhcrlppkaoqc/llvm-10.0.0-x64-windows-release.zip?dl=1'
}
]
}
},
'linux': {
'x64': {
'generic': [
{
'name': 'clang',
'version': '10.0.0',
'url': 'https://www.dropbox.com/s/fb7qtvh3t03e7zs/clang-10.0.0-x64-linux-release.zip?dl=1',
'defs': {
'CMAKE_C_COMPILER': '$$HOME$$/bin/clang',
'CMAKE_CXX_COMPILER': '$$HOME$$/bin/clang++',
},
'execs': [
'$$HOME$$/bin/clang',
'$$HOME$$/bin/clang++'
]
},
],
'debug': [
{
'name': 'antlr4-runtime-cpp',
'version': '4.8',
'url': 'https://www.dropbox.com/s/9vz3tmurvqhztqx/antlr4-4.8-x64-linux.zip?dl=1'
},
{
'name': 'llvm',
'version': '10.0.0',
'url': 'https://www.dropbox.com/s/ay865217z3z3uw5/llvm-10.0.0-x64-linux-debug.zip?dl=1'
}
],
'release': [
{
'name': 'antlr4-runtime-cpp',
'version': '4.8',
'url': 'https://www.dropbox.com/s/9vz3tmurvqhztqx/antlr4-4.8-x64-linux.zip?dl=1'
},
{
'name': 'llvm',
'version': '10.0.0',
'url': 'https://www.dropbox.com/s/ekv2dikbi4tn0us/llvm-10.0.0-x64-linux-release.zip?dl=1'
}
]
}
}
}
SIZE_ONE_MB = 1048576
def nomalizeSlash(p: str):
if sys.platform == 'win32':
p = p.replace('\\', '/')
return p
def mkpkgrelpath(dir, root):
pkgrelpath = os.path.relpath(dir, root)
pkgrelpath = os.path.normpath(pkgrelpath)
pkgrelpath = os.path.normcase(pkgrelpath)
pkgrelpath = nomalizeSlash(pkgrelpath)
return pkgrelpath
def mkpkgdir(root, package, arch, buildtype, platform):
pkgdir = os.path.join(root, package['name'])
pkgdir = os.path.join(pkgdir, package['version'])
pkgdir = os.path.join(pkgdir, arch)
if package['generic']:
pkgdir = os.path.join(pkgdir, 'generic')
else:
pkgdir = os.path.join(pkgdir, buildtype)
pkgdir = os.path.join(pkgdir, platform)
if not os.path.exists(pkgdir):
os.makedirs(pkgdir)
return pkgdir
def extractpkg(pkgzip, pkgdest, fname):
with ZipFile(pkgzip, 'r') as zfp:
entries = zfp.infolist()
totoalentries = len(entries)
extractedentries = 0
for entry in entries:
zfp.extract(entry, pkgdest)
entrydest = os.path.join(pkgdest, entry.filename)
entrydest = nomalizeSlash(entrydest)
if entry.is_dir():
if not os.path.exists(entrydest):
os.makedirs(entrydest)
else:
zfp.extract(entry, pkgdest)
extractedentries += 1
progress = (extractedentries / totoalentries)
percent = "{:3.2%}".format(progress)
print("[extract %s %s]" % (fname, percent))
def preparepkg(dir, relpath):
pkgdirvar = "\"${CMAKE_SOURCE_DIR}/packages/%s\"" % relpath
cmakefilepath = os.path.join(dir, 'package.cmake')
if not os.path.exists(cmakefilepath):
raise Exception(
"cmake include file '%s' does not exists." % cmakefilepath)
contents = []
processedlines = []
with open(cmakefilepath, 'r') as cfp:
contents = cfp.readlines()
for content in contents:
content = content.replace('$$PKGDIR$$', pkgdirvar)
processedlines.append(content)
with open(cmakefilepath, 'w') as cfp:
cfp.writelines(processedlines)
def installpkg(root, relpath):
pkgcmakefile = "packages/%s/package.cmake" % relpath
cmkinc = "INCLUDE(\"${CMAKE_SOURCE_DIR}/%s\")" % pkgcmakefile
cmkindexpath = os.path.join(root, 'index.cmake')
if not os.path.exists(cmkindexpath):
with open(cmkindexpath, 'w') as ifp:
ifp.write('# index of all packages to be included\n')
ifp.close()
includedlines = []
with open(cmkindexpath, 'r') as ifp:
includedlines = ifp.read().splitlines()
ifp.close()
if cmkinc in includedlines:
logging.warning("package is found already included in index")
else:
includedlines.append(cmkinc)
with open(cmkindexpath, 'w') as ifp:
for includedline in includedlines:
ifp.write(includedline)
ifp.write('\n')
ifp.close()
print("[installed %s]" % pkgcmakefile)
def encachepkg(root, url, home):
pkgindexfile = os.path.join(root, 'index.json')
if not os.path.exists(pkgindexfile):
with open(pkgindexfile, 'wt') as fp:
pkgentry = json.dumps({
url: {
'dir': home,
'modified_time': os.path.getmtime(home)
}
}, indent=4)
fp.write(pkgentry)
fp.close()
else:
pkgentries = None
with open(pkgindexfile, 'rt') as fp:
pkgentries = json.load(fp)
pkgentries[url] = {
'dir': home,
'modified_time': os.path.getmtime(home)
}
fp.close()
with open(pkgindexfile, 'wt') as fp:
json.dump(pkgentries, fp, indent=4)
fp.close()
print('[cached %s]' % home)
def downloadpkg(packageurl):
with urllib.request.urlopen(packageurl) as pkgres:
if pkgres.getcode() != 200:
logging.error("failed to download package from %s" % packageurl)
return None, None, False
contentlen = int(pkgres.getheader('Content-Length'))
filename = pkgres.getheader('Content-Disposition')
pattern = re.compile(
'(attachment\\; filename\\=\\")([\w\d\\-\\.]+)(\\")')
filename = pattern.match(filename).group(2)
downloadedlen = 0
with tempfile.NamedTemporaryFile('w+b', delete=False) as fp:
pkgfile = fp.name
buffer = pkgres.read(SIZE_ONE_MB)
while 0 < len(buffer):
downloadedlen += len(buffer)
fp.write(buffer)
percent = "{:3.2%}".format(downloadedlen / contentlen)
print("[download %s %s]" % (filename, percent))
buffer = pkgres.read(SIZE_ONE_MB)
if contentlen == downloadedlen:
return pkgfile, filename, True
else:
return None, None, False
def chkpkgexists(root, url):
indexfile = os.path.join(root, 'index.json')
if not os.path.exists(indexfile):
return False
else:
indexdata = {}
with open(indexfile, 'rt') as fp:
indexdata = json.load(fp)
fp.close()
if indexdata is None:
return False
entry = indexdata.get(url)
if entry is None:
return False
entrydir = entry.get('dir')
modifiedtime = entry.get('modified_time')
if entrydir is None or modifiedtime is None:
return False
if not os.path.exists(entrydir):
return False
mtime = os.path.getmtime(entrydir)
return mtime == modifiedtime
def mkbuilddir(rootdir, arch, build_type, platform):
builddir = os.path.join(rootdir, "out")
builddir = os.path.join(builddir, arch)
builddir = os.path.join(builddir, build_type)
builddir = os.path.join(builddir, platform)
builddir = nomalizeSlash(builddir)
return builddir
def runconfig(build_type, builddir, cmake_defs, rootdir):
cmkbuildType = "Debug"
if build_type == "release":
cmkbuildType = "Release"
installdir = os.path.join(builddir, "dist")
installdir = nomalizeSlash(installdir)
gn = 'NMake Makefiles'
if sys.platform == 'linux':
gn = 'Unix Makefiles'
configcmd = "cmake -G \"%s\" -B \"%s\" " % (gn, builddir)
configcmd += "-DCMAKE_INSTALL_PREFIX=\"%s\" " % installdir
configcmd += "-DCMAKE_BUILD_TYPE=%s " % cmkbuildType
for cmakedef in cmake_defs:
configcmd += "-D%s=\"%s\" " % (cmakedef, cmake_defs[cmakedef])
configcmd = configcmd.strip()
configcmd += " %s" % nomalizeSlash(rootdir)
print(configcmd)
exitcode = os.system(configcmd)
if 0 != exitcode:
raise Exception(
"Error while trying to configure using '%s'" % configcmd)
else:
print('successfully configured')
print('[BuildDir %s]' % builddir)
def main():
try:
arch = None
if sys.maxsize == (2**63 - 1):
arch = 'x64'
elif sys.maxsize == (2 ** 31 - 1):
arch = 'x86'
else:
raise Exception("unsupported architecture '%s'" % arch)
platform = sys.platform
argparser = argparse.ArgumentParser()
argparser.add_argument('--build-type', default='debug', action='store',
help='build type could be either Debug or Release')
argparser.add_argument('--arch', default=arch, action='store',
help='architecture could be x64 or x86')
argparser.add_argument('--platform', default=platform, action='store',
help='platform could be win32 or linux')
argparser.add_argument('--build', action='store_true',
help='initiate the build for present configuration')
argparser.add_argument('--clean', action='store_true',
help='clean the build before do configure')
argparser.add_argument('--test', action='store_true',
help='test the build')
args = argparser.parse_args()
build_type = args.build_type.lower()
arch = args.arch.lower()
platform = args.platform.lower()
buildflag = args.build
cleanflag = args.clean
testflag = args.test
print("[%s-%s-%s]" % (arch, platform, build_type))
rootdir = os.path.dirname(os.path.abspath(__file__))
pkgroot = os.path.join(rootdir, 'packages')
if not os.path.exists(pkgroot):
os.makedirs(pkgroot)
packages = libindex.get(platform)
if packages is None:
raise Exception("no packages found for platform '%s'" % platform)
packages = packages.get(arch)
if packages is None:
raise Exception("no packages found for arch '%s" % arch)
genericpkgs = packages.get('generic')
packages = packages.get(build_type)
if packages is None:
raise Exception(
"no packages found for build type '%s'" % build_type)
if len(packages) == 0:
logging.warning('no packages found.')
if not genericpkgs is None:
if len(genericpkgs) > 0:
for gpkg in genericpkgs:
gpkg['generic'] = True
for pkg in packages:
pkg['generic'] = False
genericpkgs.append(pkg)
packages = genericpkgs
cmake_defs = {}
for package in packages:
packageurl = package['url']
pkgdir = mkpkgdir(pkgroot, package, arch, build_type, platform)
pkgrelpath = mkpkgrelpath(pkgdir, pkgroot)
pkghome = os.path.join(pkgroot, pkgrelpath)
pkghome = nomalizeSlash(pkghome)
if not chkpkgexists(pkgroot, packageurl):
pkgfile, filename, downloadok = downloadpkg(packageurl)
if not downloadok:
raise Exception('download failed for %s' % packageurl)
extractpkg(pkgfile, pkgdir, filename)
preparepkg(pkgdir, pkgrelpath)
installpkg(pkgroot, pkgrelpath)
encachepkg(pkgroot, packageurl, pkghome)
else:
print('[exists %s-%s]' % (package['name'], package['version']))
defs = package.get('defs')
print('checking definitions')
if not defs is None:
print('definitions found')
for defkey in defs:
defval = defs[defkey]
defval = defval.replace('$$HOME$$', pkghome)
cmake_defs[defkey] = defval
print('[def %s = %s]' % (defkey, defval))
else:
print('no definitions found')
print('checking executables')
execs = package.get('execs')
if not execs is None:
print('executables found')
for path in execs:
path = path.replace('$$HOME$$', pkghome)
print('[chmod +x %s]' % path)
os.chmod(path, stat.S_IRWXU)
else:
print('no executables found')
builddir = mkbuilddir(rootdir, arch, build_type, platform)
print('[BuildDir %s]' % builddir)
if cleanflag:
if os.path.exists(builddir):
shutil.rmtree(builddir)
cmkcache = os.path.join(builddir, 'CMakeCache.txt')
if (not os.path.exists(cmkcache)) and os.path.exists(builddir):
print('[removedir %s]' % builddir)
shutil.rmtree(builddir)
print('running configuration')
runconfig(build_type, builddir, cmake_defs, rootdir)
if testflag:
buildflag = True
if buildflag:
buildcmd = 'cmake --build %s' % builddir
print(buildcmd)
exitcode = os.system(buildcmd)
if 0 != exitcode:
raise Exception('build failed')
if testflag:
testcmd = 'cmake --build %s --target test' % builddir
print(buildcmd)
exitcode = os.system(testcmd)
if 0 != exitcode:
raise Exception('test failed')
except Exception as e:
logging.error(e)
sys.exit(-1)
if __name__ == "__main__":
main()
|
990,330 | 6c06f383c4093540d7820dba99ac523eb00128ad | # Enthusiastic_Python_Basic #P_05_2_4
st = [1, 2, 3, 4, 5]
st[0:5] = []
print(st) |
990,331 | b0d032aa2646964bd59e081fcea30bb01ec5a2d4 | import turtle
print("Happy Onam")
tim = turtle.Turtle()
tim.speed(5000)
def petal(length=200,x1=0,x2=0,x3=0):
tim.color(x1,x2,x3)
tim.begin_fill()
for i in range(9):
tim.forward(length)
tim.circle(length/4,208)
tim.forward(length)
tim.right(180)
tim.right(-17)
tim.end_fill()
def circ(rad=100,x1=0,x2=0,x3=0):
tim.penup()
tim.right(-90)
tim.backward(rad)
tim.pendown()
tim.right(90)
tim.color(x1,x2,x3)
tim.begin_fill()
tim.circle(rad)
tim.end_fill()
tim.penup()
tim.right(-90)
tim.forward(rad)
tim.pendown()
tim.right(90)
def sq(x=50,x1=0,x2=0,x3=0):
tim.color(x1,x2,x3)
tim.begin_fill()
for i in range(4):
tim.forward(x)
tim.right(90)
tim.end_fill()
def sqdes(siz=20,x1=0,x2=0,x3=0,delta=20):
tim.color(x1,x2,x3)
for i in range(360/delta):
sq(siz*0.707,x1,x2,x3)
tim.right(delta)
#orange 255,166,0
circ(300, 17, 110, 0) #green
circ(280, 251,255,41) #yellow
tim.right(90)
tim.forward(201)
tim.right(-90)
tim.right(22.5)
for i in range(8):
circ(78.95,227,38,54) #crimson
tim.right(-45)
tim.forward(154)
tim.right(67.5)
tim.backward(201)
#sqdes(280,255, 253, 184,30)
tim.pensize('5')
circ(225,255,255,230) #cream
circ(206,17,110,0) #green
#red 255,75,20'''
tim.right(-90)
tim.right(11.25)
petal(160,255,166,0) #orange
petal(140,251,255,41) #yellow
petal(110,255,255,230) #jasmine
circ(75,17,110,0) #green
circ(60,255,255,230) #jasmine
circ(45,166,77,255) #violet
circ(20,251,255,41) #yellow |
990,332 | 77b5e6a3f74a8fdcd740ecd4cad6b7d1272769d3 | #!/usr/local/bin/env python3
from beautifulscraper import BeautifulScraper
scraper = BeautifulScraper()
body = scraper.go("https://github.com/adregner/beautifulscraper")
body.select(".repository-meta-content")[0].text
|
990,333 | e3a685f0f8b640e926ddea95434c0f36a911633b | data = [
1941
, 1887
, 1851
, 1874
, 1612
, 1960
, 1971
, 1983
, 1406
, 1966
, 1554
, 1892
, 1898
, 1926
, 1081
, 1992
, 1073
, 1603
, 177
, 1747
, 1063
, 1969
, 1659
, 1303
, 1759
, 1853
, 1107
, 1818
, 1672
, 1352
, 2002
, 1838
, 1985
, 1860
, 1141
, 1903
, 1334
, 1489
, 1178
, 1823
, 1499
, 1951
, 1225
, 1503
, 1417
, 1724
, 1165
, 1339
, 1816
, 1504
, 1588
, 1997
, 1946
, 1324
, 1771
, 1982
, 1272
, 1367
, 1439
, 1252
, 1902
, 1940
, 1333
, 1750
, 1512
, 1538
, 1168
, 2001
, 1797
, 1233
, 972
, 1306
, 1835
, 1825
, 1822
, 1880
, 1732
, 1785
, 1727
, 1275
, 1355
, 1793
, 1485
, 1297
, 1932
, 1519
, 1587
, 1382
, 1914
, 1745
, 1087
, 1996
, 1746
, 1962
, 1573
, 2008
, 1868
, 1278
, 1386
, 1238
, 1242
, 1170
, 1476
, 1161
, 1754
, 1807
, 1514
, 1189
, 1916
, 1884
, 1535
, 1217
, 1911
, 1861
, 1493
, 1409
, 1783
, 1222
, 1955
, 1673
, 1502
, 607
, 2010
, 1846
, 1819
, 1500
, 1799
, 1475
, 1146
, 1608
, 1806
, 1660
, 1618
, 1904
, 978
, 1762
, 1925
, 1185
, 1154
, 1239
, 1843
, 1986
, 533
, 1509
, 1913
, 287
, 1707
, 1115
, 1699
, 1859
, 1077
, 1915
, 1412
, 1360
, 1646
, 1973
, 1627
, 1755
, 1748
, 1769
, 1886
, 1422
, 1686
, 950
, 100
, 1372
, 1068
, 1370
, 1428
, 1870
, 1108
, 190
, 1891
, 1794
, 1228
, 1128
, 1365
, 1740
, 1888
, 1460
, 1758
, 1906
, 1917
, 1989
, 1251
, 1866
, 1560
, 1921
, 1777
, 1102
, 1850
, 1498
, 683
, 1840
, 1800
, 1112
, 1908
, 1442
, 1082
, 1071
]
dictionary = {}
for current in data:
complement = 2020 - current
if current in dictionary.keys():
print("The result of Day #1.1 is: " + str(current * dictionary[current]))
break
else:
dictionary[complement] = current
print("</Fin>") |
990,334 | e34b4641b2bde0b8af9c6d5d15e5eacebba857a9 | from Sistema_Locadora.Connection_DataBase.Connection import Connection
from Sistema_Locadora.Operations_CRUD.Date_Hour_Now import DateHourNow
class DvdGame(Connection):
global date_hour
now = DateHourNow()
date_hour = now.get_date_hour_now()
def check_qtde_dvd_games_avaible(self, id_dvd_jogo):
try:
select_qty_dvd_games = "SELECT QTDE FROM DVD_JOGO WHERE ID_DVD_JOGO = {0};".format(id_dvd_jogo)
self.cursor.execute(select_qty_dvd_games)
qty_dvds_games = self.cursor.fetchone()
if qty_dvds_games is not None:
return qty_dvds_games[0]
else:
return int(0)
except():
print("_"*80)
print("Erro ao tentar recuperar a quantidade de Dvds ou Jogos cadastrados da tabela DVD_JOGOS.")
def return_id(self, nome_dvd_jogo):
try:
select_id_dvd_game = "SELECT ID_DVD_JOGO FROM DVD_JOGO WHERE UPPER(NOME) = '{0}';"\
.format(nome_dvd_jogo.upper())
self.cursor.execute(select_id_dvd_game)
id_dvd_game = self.cursor.fetchone()
if id_dvd_game is not None:
return id_dvd_game[0]
else:
return None
except():
self.close_connection()
print("_"*80)
print("Erro ao tentar recuperar o registro do id_dvd_game.")
def insert(self, datas=[]):
try:
id_dvd_game = self.return_id(datas[0])
if id_dvd_game is None:
sql = ("INSERT INTO DVD_JOGO(NOME,GENERO,CLASSIFICACAO_IDADE,TIPO,PRECO,DATA_FABRICACAO,QTDE"
",DATA_CRIACAO,DATA_ATUALIZACAO, ID_USUARIO)"
f"VALUES('{datas[0]}','{datas[1]}',{datas[2]},'{datas[3]}',{datas[4]},"
f"'{datas[5]}',{datas[6]},'{date_hour}','{date_hour}',{datas[7]});")
self.cursor.execute(sql)
self.connection.commit()
print("_"*80)
print("O Registro do (Dvd ou Jogo) foi cadastrado com sucesso.\nNome: {0}".format(datas[0].title()))
else:
print("_"*80)
print("Este DVD ou jogo cujo o nome é: {0} já se encontra cadastrado.".format(datas[0].title()))
except():
print("_" * 80)
print("Erro ao tentar inserir um novo Dvd ou jogo. ")
finally:
self.close_connection()
def insert_multiple(self, datas=[]):
try:
id_dvd_game = self.return_id(datas[0])
if id_dvd_game is None:
self.cursor.callproc("INSERT_CARGA_CSV_P", [datas[0], datas[1], datas[2], datas[3], datas[4], datas[5],
datas[6]])
print("_"*80)
print("O Registro do (Dvd ou Jogo) foi cadastrado com sucesso.\nNome: {0}".format(datas[0].title()))
else:
print("_"*80)
print("Este DVD ou jogo cujo o nome é: {0} já se encontra cadastrado.".format(datas[0].title()))
except():
print("_" * 80)
print("Erro ao tentar inserir um novo Dvd ou jogo. ")
self.close_connection()
def update_name(self, name_old, new_name, user_id):
try:
id_dvd_game = self.return_id(name_old)
if id_dvd_game is not None:
sql = ("UPDATE DVD_JOGO SET NOME = '{0}',DATA_ATUALIZACAO = '{1}', ID_USUARIO = {2}"
" WHERE ID_DVD_JOGO = {3};".format(new_name, date_hour, user_id, id_dvd_game))
self.cursor.execute(sql)
self.connection.commit()
print("_"*80)
print("O Nome do Dvd ou Jogo foi alterado com sucesso.")
else:
print("_"*80)
print("Não existe este DVD ou Jogo cadastrado no sistema.")
except():
print("_"*80)
print("Erro ao tentar realizar o update do Dvd Ou Jogo.")
finally:
self.close_connection()
def update_genare(self, name_dvd_game, genare, user_id):
try:
id_dvd_game = self.return_id(name_dvd_game)
if id_dvd_game is not None:
sql = ("UPDATE DVD_JOGO SET GENERO = '{0}',DATA_ATUALIZACAO = '{1}', ID_USUARIO = {2}"
" WHERE ID_DVD_JOGO = {3};".format(genare, date_hour, user_id, id_dvd_game))
self.cursor.execute(sql)
self.connection.commit()
print("_"*80)
print("O Gênero do DVD ou Jogo foi alterado com sucesso.")
else:
print("_"*80)
print("Não existe registro do DVD ou Jogo cadastrado no sistema.")
except():
print("_"*80)
print("Erro ao tentar realizar o update do Dvd Ou Jogo.")
finally:
self.close_connection()
def update_classification_age(self, name_dvd_game, classification_age, user_id):
try:
id_dvd_game = self.return_id(name_dvd_game)
if id_dvd_game is not None:
sql = ("UPDATE DVD_JOGO SET CLASSIFICACAO_IDADE = {0},DATA_ATUALIZACAO = '{1}', ID_USUARIO = {2}"
" WHERE ID_DVD_JOGO = {3};".format(classification_age, date_hour, user_id, id_dvd_game))
self.cursor.execute(sql)
self.connection.commit()
print("_" * 80)
print("A Classificação de Idade foi alterada com sucesso.")
else:
print("_" * 80)
print("Não existe registro do DVD ou Jogo cadastrado no sistema.")
except():
print("_" * 80)
print("Erro ao tentar realizar o update do Dvd Ou Jogo.")
finally:
self.close_connection()
def update_type_dvd_game(self, name_dvd_game, type_dvd_game, user_id):
try:
id_dvd_game = self.return_id(name_dvd_game)
if id_dvd_game is not None:
sql = ("UPDATE DVD_JOGO SET TIPO = '{0}',DATA_ATUALIZACAO = '{1}', ID_USUARIO = {2}"
" WHERE ID_DVD_JOGO = {3};".format(type_dvd_game, date_hour, user_id, id_dvd_game))
self.cursor.execute(sql)
self.connection.commit()
print("_" * 80)
print("O Tipo do DVD ou Jogo foi alterado com sucesso.")
else:
print("_" * 80)
print("Não existe registro do DVD ou Jogo cadastrado no sistema.")
except():
print("_" * 80)
print("Erro ao tentar realizar o update do Dvd Ou Jogo.")
finally:
self.close_connection()
def update_price(self, name_dvd_game, price, user_id):
try:
id_dvd_game = self.return_id(name_dvd_game)
if id_dvd_game is not None:
sql = ("UPDATE DVD_JOGO SET PRECO = {0},DATA_ATUALIZACAO = '{1}', ID_USUARIO = {2}"
" WHERE ID_DVD_JOGO = {3};".format(price, date_hour, user_id, id_dvd_game))
self.cursor.execute(sql)
self.connection.commit()
print("_" * 80)
print("O Preço do DVD ou Jogo foi alterado com sucesso.")
else:
print("_" * 80)
print("Não existe registro do DVD ou Jogo cadastrado no sistema.")
except():
print("_" * 80)
print("Erro ao tentar realizar o update do Dvd Ou Jogo.")
finally:
self.close_connection()
def update_fabrication_date(self, name_dvd_game, fabrication_date, user_id):
try:
id_dvd_game = self.return_id(name_dvd_game)
if id_dvd_game is not None:
sql = ("UPDATE DVD_JOGO SET DATA_FABRICACAO = '{0}',DATA_ATUALIZACAO = '{1}', ID_USUARIO = {2}"
" WHERE ID_DVD_JOGO = {3};".format(fabrication_date, date_hour, user_id, id_dvd_game))
self.cursor.execute(sql)
self.connection.commit()
print("_" * 80)
print("A Data de Fabricação do DVD ou Jogo foi alterado com sucesso.")
else:
print("_" * 80)
print("Não existe registro do DVD ou Jogo cadastrado no sistema.")
except():
print("_" * 80)
print("Erro ao tentar realizar o update do Dvd Ou Jogo.")
finally:
self.close_connection()
def update_quantity(self, name_dvd_game, quantity, user_id):
try:
id_dvd_game = self.return_id(name_dvd_game)
if id_dvd_game is not None:
sql = ("UPDATE DVD_JOGO SET QTDE = {0},DATA_ATUALIZACAO = '{1}', ID_USUARIO = {2}"
" WHERE ID_DVD_JOGO = {3};".format(quantity, date_hour, user_id, id_dvd_game))
self.cursor.execute(sql)
self.connection.commit()
print("_" * 80)
print("A Quantidade de DVD ou Jogo foi alterado com sucesso.")
else:
print("_" * 80)
print("Não existe registro do DVD ou Jogo cadastrado no sistema.")
except():
print("_" * 80)
print("Erro ao tentar realizar o update do Dvd Ou Jogo.")
finally:
self.close_connection()
def delete(self, nome_dvd_jogo):
try:
id_dvd_game = self.return_id(nome_dvd_jogo)
if id_dvd_game is not None:
sql = ("DELETE FROM DVD_JOGO WHERE ID_DVD_JOGO = {0};".format(id_dvd_game))
self.cursor.execute(sql)
self.connection.commit()
print("_"*80)
print("O registro do Dvd Ou Jogo:{0} foi deletado com sucesso.".format(nome_dvd_jogo.title()))
else:
print("_"*80)
print("Não existe registro pra este Dvd Ou Jogo.")
except():
print("_"*80)
print("Erro ao tentar deletar este registro:{0}".format(nome_dvd_jogo))
finally:
self.close_connection()
def select_all(self):
try:
sql = "SELECT * FROM DVD_JOGOS_V ORDER BY NOME;"
self.cursor.execute(sql)
datas = self.cursor.fetchall()
print("\t \t \t Dvd's e Jogos")
print("_"*80)
for registers in datas:
id_dvd_jogo = registers[0]
name = registers[1]
genare = registers[2]
classification_age = registers[3]
type_reg = registers[4]
price = registers[5]
date_fab = registers[6]
date_insert = registers[7]
quantity = registers[8]
update_date = registers[10]
user_id = registers[11]
print("Código do (Dvd ou Jogo): {0}".format(id_dvd_jogo))
print("Nome: {0} ".format(name.title()))
print("Gênero: {0} ".format(genare.title()))
print("Classificação de Idade: {0} ".format(classification_age))
print("Tipo: {0} ".format(type_reg.title()))
print("Preço: {0} ".format(price))
print("Data de Fabricação: {0} ".format(date_fab))
print("Data de Cadastro: {0} ".format(date_insert))
print("Quantidade: {0} ".format(quantity))
print("Data de Atualização: {0}".format(update_date))
print("Código Do Usuário que Realizou a Última Atualização: {0}".format(user_id))
print("_"*80)
if len(datas) > 1:
print("Quantidade de registros: {0} ".format(len(datas)))
elif len(datas) == 1:
print("Você possuí somente: {0} registro cadastrado. ".format(len(datas)))
else:
print("Não existe nenhum dvd ou jogo cadastrado.")
except():
print("_"*80)
print("Erro ao tentar retornar todos os registros.")
finally:
self.close_connection()
def search_dvd_game(self, name_dvd_game):
try:
sql = f"SELECT * FROM DVD_JOGOS_V WHERE UPPER(NOME) LIKE '%{name_dvd_game.upper()}%';"
self.cursor.execute(sql)
records = self.cursor.fetchall()
print("\t \t \t Dvd e Jogos")
print("_"*80)
if records is not None:
for datas in records:
print("_"*80)
id_dvd_jogo = datas[0]
name = datas[1]
genare = datas[2]
classification_age = datas[3]
type_reg = datas[4]
price = datas[5]
date_fab = datas[6]
date_insert = datas[7]
quantity = datas[8]
update_date = datas[10]
user_id = datas[11]
print("Código do (Dvd ou Jogo): {0}".format(id_dvd_jogo))
print("Nome: {0} ".format(name.title()))
print("Gênero: {0} ".format(genare.title()))
print("Classificação de Idade: {0} ".format(classification_age))
print("Tipo: {0} ".format(type_reg.title()))
print("Preço: {0} ".format(price))
print("Data de Fabricação: {0} ".format(date_fab))
print("Data de Cadastro: {0} ".format(date_insert))
print("Quantidade: {0} ".format(quantity))
print("Data de Atualização: {0}".format(update_date))
print("Código Do Usuário que Realizou a Última Atualização: {0}".format(user_id))
else:
print("Não existe este Dvd ou Jogo cadastrado no sistema.")
except():
print("_" * 80)
print("Erro ao tentar retornar todos os registros.")
finally:
self.close_connection()
"""
data=[]
data.append('Fifa 2018')
data.append('Esporte')
data.append(10)
data.append('Esporte')
data.append(250.00)
data.append('2017.06.02')
data.append(2)
"""
# dvd_game = DvdGame()
# dvd_game.return_id("Fifa 2018")
# dvd_game.insert(data)
# dvd_game.select_all()
# dvd_game.search_dvd_game('Fifa 2018')
# dvd_game.update(data)
# dvd_game.delete("Fifa 2018")
|
990,335 | a42fdf7cd6e4369e2e23dbf5260c4fc886b5aed8 | from create_project import *
|
990,336 | 2ac2421332e9fef0a94c1be742a83d2294b96c30 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: rtwik
"""
from setuptools import find_packages
from setuptools import setup
setup(
name='writing_level_classifier',
version='0.1.dev0',
packages=find_packages(),
install_requires=[
"Keras == 2.1.5",
"tensorflow == 1.6.0",
"pandas == 0.22.0",
"nltk == 3.2.5",
"scikit-learn == 0.19.1",
"h5py == 2.7.1",
"pyenchant == 1.6.10"
],
)
|
990,337 | 8efece4478950f085da119598b82f1961e232dd0 | class CharClass:
""" Character Base Class """
def SetForce(self):
self.cForce['health'] = self.cStats['end'] * 3
self.cForce['mana'] = self.cStats['int'] * 4
self.cForce['dam'] = self.cStats['str'] * 2
def __init__(self, Name = "NONE", Race = "Human", Class = "Warrior", Stats = {'str': 1, 'int': 1, 'end': 1}):
self.cName = Name
self.cRace = Race
self.cClass = Class
self.cStats = Stats
self.cForce = {'health': 1, 'mana': 1, 'dam': 1}
self.SetForce()
def ChangeStats(self,str,int,end):
self.cStats['str'] = str
self.cStats['int'] = int
self.cStats['end'] = end
self.SetForce()
def TestStuff(self, Stuff):
print Stuff
|
990,338 | 2b962fb3c0c33a5e21d4c5a53bf6118f494024a9 | # package definitions
BITBUCKET = [
# (python_package_name, bitbucket_owner_name/bitbucket_organization, bitbucket_repo_name)
# for example, consider an entry for https://bitbucket.org/atlassian/python-bitbucket
# ('pybitbucket', 'atlassian', 'python-bitbucket')
]
# --- local git repositories
# format :: (python_package_name: str, absolute_file_path: str)
GIT_LOCAL = [
]
|
990,339 | a62b8254569ce2cb96138020b6b50a2caf98b0b9 | from sqlalchemy import create_engine
e = create_engine("mysql+pymysql://vcchau:parksareawesome@parksareawesome.chr9q1gt6nxw.us-east-1.rds.amazonaws.com:3306/parksareawesomedb")
e.execute("""
drop table events;
""") |
990,340 | f9c6166557440e0ddbc304a46b29ac16cc2638f1 | import datetime
import math
import numpy as np
import pytest
from .country_report import CountryReport
from .formula import AtgFormula
def test_two_traces():
cumulative_active = np.array([0, 0, 1, 2, 5, 18, 45])
start_date = datetime.date(2020, 4, 17)
dates = [start_date + datetime.timedelta(days=d) for d in range(len(cumulative_active))]
report = CountryReport(
short_name="UK",
long_name="United Kingdom",
dates=dates,
daily_positive=None,
daily_dead=None,
daily_recovered=None,
daily_active=None,
cumulative_active=cumulative_active,
population=None,
)
display_until = datetime.date(2020, 4, 30)
f1 = AtgFormula(tg=2, a=47, exponent=1.5, min_case_count=2)
# The trace starts (t=0) at 2020-04-19. The maximum of this trace is at t = TG * exponent = 3.
max_t1, start_date1 = 3, datetime.date(2020, 4, 19)
length1 = math.ceil(2 * (1.5 + math.sqrt(1.5)))
f2 = AtgFormula(tg=3, a=12, exponent=2, min_case_count=1)
# The trace starts (t=0) at 2020-04-18. The maximum of this trace is at t = TG * exponent = 6.
max_t2, start_date2, length2 = 6, datetime.date(2020, 4, 18), math.ceil(3 * (2 + math.sqrt(2)))
trace_generator1 = f1.get_trace_generator(report)
trace1 = trace_generator1.generate_trace(display_until)
assert trace1.max_value == pytest.approx((47 / 2) * (max_t1 / 2) ** 1.5 * math.exp(-max_t1 / 2))
assert trace1.max_value_date == start_date1 + datetime.timedelta(days=max_t1)
assert trace1.xs[0] == start_date1
assert trace_generator1.display_at_least_until == start_date1 + datetime.timedelta(days=length1)
trace_generator2 = f2.get_trace_generator(report)
trace2 = trace_generator2.generate_trace(display_until)
assert trace2.max_value == pytest.approx((12 / 3) * (max_t2 / 3) ** 2 * math.exp(-max_t2 / 3))
assert trace2.max_value_date == start_date2 + datetime.timedelta(days=max_t2)
assert trace2.xs[0] == start_date2
assert trace_generator2.display_at_least_until == start_date2 + datetime.timedelta(days=length2)
|
990,341 | ee4f1f3410c48e8b5dbce55805743be0c6b9ee0d | ''' Crie um programa que leia duas notas de um aluno e calcule sua média, mostrando uma mensagem no final, de acordo com a média atingida:
- Média abaixo de 5.0: REPROVADO
- Média entre 5.0 e 6.9: RECUPERAÇÃO
- Média 7.0 ou superior: APROVADO'''
n1 = float(input('Digite a Primeira Nota: '))
n2 = float(input('Digite a Segunda Nota: '))
soma = (n1+n2)/2
if soma < 5:
print('\033[31m REPROVADO \033[m')
elif soma <= 6.9:
print('\033[33m RECUPERAÇÃO \033[m')
elif soma >= 7 :
print('\033[34m APROVADO \033[m') |
990,342 | 55bca2c24303401024774d64bbebd1b6b239d6eb | import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
import db_mac
from _collections import OrderedDict
# precision
airlines_score = dict()
sql1 = "select aircraft, count(*) from od2 where bound like 'in' and timestamp::time > '07:59:00'::time and timestamp::time <= '16:40:00'::time and aircraft is not null group by 1 order by 2 desc limit 15"
result1 = db_mac.execute(sql1)
aircrafts = set()
for row1 in result1:
if row1[0] == "A20N":
aircrafts.add("A320")
elif row1[0] == "A21N":
aircrafts.add("A321")
elif row1[0] == "B738" or row1[0] == "B739":
aircrafts.add("B737")
else:
aircrafts.add(row1[0])
print(aircrafts)
for a in aircrafts:
sql2 = """select precision from od2 where object is not null and aircraft like '"""+ str(a) +"""' and timestamp::time > '07:59:00'::time and timestamp::time <= '16:40:00'::time """
result2 = db_mac.execute(sql2)
values = []
for row2 in result2:
if len(result2) > 0:
prec_list = row2[0].replace("[", "").replace("]", "").split(",")
for i in prec_list:
values.append(float(i))
p = round(np.mean(values) * 100, 2)
airlines_score[a] = p
sorted_aircraft_score = sorted(airlines_score.items(), key=operator.itemgetter(1))
print(sorted_aircraft_score)
prec_aircraft = defaultdict(float)
for a in sorted_aircraft_score:
count_all = 0
all_det = 0
sql1 = """select count(*) from od2 where bound like 'in' and aircraft like '""" + str(a[0]) +"""' and timestamp::time > '07:59:00'::time and timestamp::time <= '16:40:00'::time"""
result1 = db_mac.execute(sql1)
for row1 in result1:
print(row1[0])
count_all = row1[0]
sql2 = """select count(*) from od2 where bound like 'in' and object is not null and aircraft like '""" + str(a[0]) + """' and timestamp::time > '07:59:00'::time and timestamp::time <= '16:40:00'::time"""
result2 = db_mac.execute(sql2)
for row2 in result2:
print(row2[0])
all_det = row2[0]
prec_aircraft[a[0]] = round(all_det / count_all * 100, 2)
print(prec_aircraft)
N = len(sorted_aircraft_score)
fig, ax = plt.subplots(figsize=(10, 4))
ind = np.arange(N) # the x locations for the groups
width = 0.2 # the width of the bars
p1 = ax.bar(ind, [i[1] for i in sorted_aircraft_score], width, color="#2980b9")
ax.set_title('Erkennungsrate Flugzeugtypen und Ähnlichkeitswahrscheinlichkeit')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels([i[0] for i in sorted_aircraft_score], rotation=90)
ax.set_ylim(88,100)
ax.set_ylabel('Ähnlichkeitswahrscheinlichkeit in %', color="#2980b9")
#ax.autoscale_view()
ax2 = ax.twinx()
p2 = ax2.bar(ind + width, prec_aircraft.values(), width, color="#e67e22")
ax2.set_ylabel('Erkennung der Fluggesellschaft in %', color="#e67e22")
ax2.set_xticks(ind + width / 2)
#ax2.set_xticklabels([i[0] for i in prec_airline], rotation=90)
ax2.set_ylim(0,100)
plt.tight_layout()
plt.show()
|
990,343 | 9d673ca06275bfafc40637d7f9308d424b5e5245 | mydict={
"name":"sakssham mudgal",
"class": "btech",
"roll no":[1900330100188,2019]
}
#can use single quotes
print(mydict['name'])
print(mydict['class'])
print(mydict['roll no'])
#can use double quotes
print(mydict["name"])
print(mydict["class"])
print(mydict["roll no"])
#nested dictionary
myfirstdict={
"name":"Kelvin Peterson",
"class": "MBA",
"roll no":[1900330100188,2019],
"subjects": { #nested dictionay element can have only one key:value
"first":"OS",
}
}
print(myfirstdict["name"])
print(myfirstdict["class"])
print(myfirstdict["subjects"])
print(myfirstdict["subjects"]['first'])
#dictionay is mutable i.e., changeable
myfirstdict["class"]="MCA"
print(myfirstdict)
|
990,344 | ef6cb3b8757ec2f0fc31710a276a20b8c9325f75 | ba=input()
if ba.isalpha():
za=ba.lower()
if za=="a" or za=="e" or za=="i" or za=="o" or za=="u":
print("Vowel")
else:
print("Consonant")
else:
print("invalid")
|
990,345 | c062c6bbe1fdd3f3c03470f36a416a36a17fdb83 | from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from invitation.views import request_invite, approve_invite
urlpatterns = patterns('',
url(r'^complete/$', TemplateView.as_view(template_name='invitation/invitation_complete.html'), name='invitation_complete'),
url(r'^invited/(?P<invitation_key>\w+)/$', approve_invite, name='invitation_invited'),
url(r'^request/$', request_invite, name='request_invite')
)
|
990,346 | f4e821a128cd680f6dc33526024e9888c42c2fd7 | from simulations.recreate_collisions import create_cars_from_collision_json
from json import JSONDecoder
import os
log_directory = os.path.dirname(os.path.abspath(__file__)) + "/../logs/"
reading_file = open(log_directory + "collisions1.log")
file_string = "["
for line in reading_file:
file_string += line
file_string = file_string[:len(file_string)-2] + "]"
json_collisions = JSONDecoder().decode(file_string)
collision_positions = [
"1132", "1134", "2241", "2243", "3312", "3314", "4421", "4423"
]
positive = 0
for json_collision in json_collisions:
ble = False
car_dict = create_cars_from_collision_json(json_collision)
lanes_order = ""
for car in list(car_dict):
lanes_order += str(car_dict[car].get_lane())
for option in collision_positions:
if option in lanes_order:
ble = True
positive += 1
break
if not ble:
print json_collision
print lanes_order
print positive, len(json_collisions)
|
990,347 | c7a365d6aa93e1393b24fc05eb2527ddbbf64784 | # 1 创建学生表:有学生 id,姓名,密码,年龄
create table student(
id int primary key auto_increment,
name varchar(50) not null,
password varchar(25) default '123',
age varchar(10)
);
# 2 创建学校表:有学校id,学校名称,地址
create table school(
id int primary key auto_increment,
name varchar(50) not null,
address varchar(100) not null
);
# 3 创建课程表:有课程id,课程名称,课程价格,课程周期,所属校区(其实就是学校id)
create table course(
id int primary key auto_increment,
name varchar(50) not null,
price int not null,
cycle varchar(50) not null,
school_id varchar(50) not null
);
# 4 创建选课表:有id,学生id,课程id
create table curricula_variable(
id int primary key auto_increment,
student_id int not null,
course_id int not null
);
# 添加学生:张三,20岁,密码123
# 李四,18岁,密码111
insert into student(name, password, age) values('张三', '123', '20'), ('李四', '111','18');
# 创建学校:oldboyBeijing 地址:北京昌平
# oldboyShanghai 地址:上海浦东
insert into school(name, address) values('oldboyBeijing', '北京昌平'), ('oldboyShanghai', '上海浦东');
# 创建课程:Python全栈开发一期,价格2w, 周期5个月,属于上海校区
# Linux运维一期 价格200,周期2个月,属于上海校区
# Python全栈开发20期 ,价格2w,周期5个月,属于北京校区
insert into course(name, price, cycle, school_id) values('Python全栈开发一期', 20000, 5, 2), ('Linux运维一期', 200, 2, 2), ('Python全栈开发20期', 20000, 5, 1);
# 张三同学选了Python全栈开发一期的课程
# 李四同学选了Linux运维一期的课程
# (其实就是在选课表里添加数据)
insert into curricula_variable(student_id, course_id) values(1, 1), (2, 2);
# 查询:查询北京校区开了什么课程
select * from course where school_id=1;
# 查询上海校区开了什么课程
select * from course where school_id=2;
# 查询年龄大于19岁的人
select * from student where age>19;
# 查询课程周期大于4个月的课程
select * from course where cycle>4;
|
990,348 | 0c3d7c323283a1d761e3912ea940d72d5dbc7e4c | import time
import sys
class Problem():
def __init__(self):
self.orientation = (0, 1) # (0,1) = haut (1,0) = droite (-1,0) = gauche (0,-1) = bas
self.position = (50, 50) # position actuelle de la fourmi sur la grille
self.compteur_de_noirs = 0 # compte le nombres de cases noires sur la grille
self.compteur_des_etapes = 0 # nombre d'étapes déjà effectuées
self.grille = [[True for i in range(100)]for j in range(100)] # grille sur laquelle évolue la fourmi True =
# blanc et False = noir
self.couleur_derniere_position = True # couleur de la position précédente après son changement de couleur
self.liste_dernier_cycle = [] # dernier cycle de taille 104 rencontré, utilisée pour comparer avec le cycle en
# cours pour vérifier si le motif qui se répète est atteint
self.compteur_meme_cycle = 0 # nombre de fois successives où le dernier cycle était identique à celui en cours
@staticmethod
def tourner_sens_aiguilles(orientation_actuelle): # fait tourner le vecteur orientation de 90° dans le sens
# des aiguilles d'une montre
return -orientation_actuelle[1], orientation_actuelle[0]
@staticmethod
def tourner_sens_contraire_aiguilles(orientation_actuelle): # fait tourner le vecteur orientation de 90° dans
# le sens contraire des aiguilles d'une montre
return orientation_actuelle[1], -orientation_actuelle[0]
'''On sait que la fourmi après plus de environ 10 000 étapes suit un cycle régulier de logueur 104.
Dans un premier temps je simule les 10 000 premières étapes, puis je simule par blocs de 104 jusqu\'a obtenir 3 fois
de suite le meme cycle. Plus de détails ici : https://en.wikipedia.org/wiki/Langton%27s_ant'''
def simuler_une_etape(self): # simule une étape de la fourmi en considérant la grille, la position de la fourmi et
# sa direction
couleur_position = self.grille[self.position[0]][self.position[1]]
if couleur_position:
nouvelle_direction = self.tourner_sens_aiguilles(self.orientation)
else:
nouvelle_direction = self.tourner_sens_contraire_aiguilles(self.orientation)
nouvelle_position = tuple(map(sum, zip(self.position, nouvelle_direction))) # additionne vectoriellement le
# vecteur position et le vecteur direction
self.grille[self.position[0]][self.position[1]] = not couleur_position # on inverse la couleur dans la case
# initiale
if couleur_position:
self.compteur_de_noirs += 1
else:
self.compteur_de_noirs -= 1
self.position = nouvelle_position
self.orientation = nouvelle_direction
self.couleur_derniere_position = not couleur_position
def simuler_10000(self): # simule les 10 024 premières étapes, je choisis 10 024 car (10**18-10024) = 0 mod 104
for i in range(10024):
self.simuler_une_etape()
self.compteur_des_etapes += 1
def simuler_104(self): # simule 104 étapes en stockant les résultats de chaque étapes pour pour pouvoir déterminer
# a posteriori si un cycle qui se répète est atteint
cycle_en_cours = []
for i in range(104):
self.simuler_une_etape()
self.compteur_des_etapes += 1
cycle_en_cours.append(self.couleur_derniere_position)
if self.liste_dernier_cycle == cycle_en_cours:
self.compteur_meme_cycle += 1
else:
self.compteur_meme_cycle = 0
self.liste_dernier_cycle = cycle_en_cours
def obtenir_nombre_de_noirs_par_cycle(self): # a partir de la liste du cycle donne le nombre de cases noires créées
nombre_de_noirs_crees = 0
for i in self.liste_dernier_cycle:
if i: # si la case est devenue blanche on a perdu une case noire
nombre_de_noirs_crees -= 1
else: # si la case est devenue noire on a gagné une case blanche
nombre_de_noirs_crees += 1
return nombre_de_noirs_crees
def get(self):
self.simuler_10000()
while self.compteur_meme_cycle < 3:
self.simuler_104()
etapes_restantes = 10**18 - self.compteur_des_etapes
cycles_restants = etapes_restantes // 104 # etapes restantes est divisibles par 104 car 10**18 - 10 024 est
# divisible par 104
nombre_de_noirs_par_cycle = self.obtenir_nombre_de_noirs_par_cycle()
self.compteur_de_noirs = self.compteur_de_noirs + (cycles_restants * nombre_de_noirs_par_cycle)
def solve(self):
self.get()
print(self.compteur_de_noirs)
def main():
start = time.perf_counter()
u = Problem()
u.solve()
print('temps d execution', time.perf_counter() - start, 'sec')
if __name__ == '__main__':
sys.exit(main())
|
990,349 | 8535a43af5567ad7a8fd991b90b5f491149aa6be | # Queue is a FIFO structure.
from collections import deque
class Queue(object):
def __init__(self):
self.items = deque()
@classmethod
def new(cls):
queue = cls()
return queue
def insert(self, data):
self.items.appendleft(data)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[0]
def is_empty(self):
try:
is_not_empty = self.items[0]
return False
except IndexError:
return True
def show(self):
if self.is_empty():
return "EMPTY QUEUE"
for elem in self.items:
print(str(elem))
print("---------")
q = Queue.new()
q.show()
q.insert(3)
q.show()
q.insert(2)
q.show()
q.insert(1)
q.show()
q.pop()
q.show()
print(str(q.is_empty()))
q.pop()
q.pop()
print(str(q.is_empty())) |
990,350 | dabb3a9dbf31cbda5586b4c0074211fc80cc6c62 | from messages import *
from kickstarter import *
from twitter import *
from rss import *
from valenbisi import *
|
990,351 | a77b0d720d9d1a4e1ffc360ed167fbd995863d36 | #!/usr/bin/env python
__import__('sys').path.append(__import__('os').path.join(__import__('os').path.dirname(__file__), '..'))
__import__('testfwk').setup(__file__)
# - prolog marker
import os, logging
from testfwk import create_config, run_test, str_dict_testsuite, testfwk_set_path, try_catch
from grid_control.backends.aspect_status import CheckInfo, CheckJobs, CheckStatus
from grid_control.backends.backend_tools import ChunkedExecutor, ProcessCreatorAppendArguments
from grid_control.job_db import Job
testfwk_set_path('../bin')
def test_check(plugin, src='/dev/null', src_err='/dev/null', src_ret=0, wms_id_list=None, overwrite_exec=None, do_print=True, config_dict=None, **kwargs):
config = create_config(config_dict=config_dict or {})
check_executor = CheckJobs.create_instance(plugin, config, **kwargs)
check_executor.setup(logging.getLogger())
if overwrite_exec:
check_executor._proc_factory._cmd = overwrite_exec
os.environ['GC_TEST_FILE'] = src
os.environ['GC_TEST_ERR'] = src_err
os.environ['GC_TEST_RET'] = str(src_ret)
if wms_id_list:
executor = ChunkedExecutor(config, 'status', check_executor)
executor.setup(logging.getLogger())
else:
executor = check_executor
for wms_id, jobStatus, jobInfo in executor.execute(wms_id_list or []):
for key in list(jobInfo):
if isinstance(key, int):
jobInfo['<%s>' % CheckInfo.enum2str(key)] = jobInfo.pop(key)
if do_print:
print('%s %s %s' % (wms_id, Job.enum2str(jobStatus), str_dict_testsuite(jobInfo)))
print('STATUS=%s' % CheckStatus.enum2str(executor.get_status()))
class Test_Base:
"""
>>> try_catch(lambda: test_check('CheckJobs'), 'AbstractError', 'is an abstract function')
caught
>>> try_catch(lambda: test_check('CheckJobsWithProcess', proc_factory=ProcessCreatorAppendArguments(create_config(), 'ls')), 'AbstractError', 'is an abstract function')
caught
"""
class Test_ARC:
"""
>>> test_check('ARCCheckJobs', wms_id_list=['gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m'])
STATUS=OK
>>> test_check('ARCCheckJobs', 'test.ARC.status1')
gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m QUEUED {'<RAW_STATUS>': 'queuing', 'entry valid for': '20 minutes', 'entry valid from': '2016-06-18 18:20:24', 'id on service': 'U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m', 'job management url': 'gsiftp://grid-arc0.desy.de:2811/jobs (org.nordugrid.gridftpjob)', 'job status url': 'ldap://grid-arc0.desy.de:2135/Mds-Vo-Name=local,o=grid??sub?(nordugrid-job-globalid=gsiftp:\\\\2f\\\\2fgrid-arc0.desy.de:2811\\\\2fjobs\\\\2fU3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m) (org.nordugrid.ldapng)', 'name': 'ARC', 'other messages': 'SubmittedVia=org.nordugrid.gridftpjob', 'owner': '/C=DE/O=GermanGrid/OU=uni-hamburg/CN=Fred Markus Stober', 'queue': 'grid', 'requested slots': '1', 'service information url': 'ldap://grid-arc0.desy.de:2135/Mds-Vo-Name=local,o=grid??sub?(objectClass=*) (org.nordugrid.ldapng)', 'session directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m', 'stagein directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m', 'stageout directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m', 'state': 'Queuing (INLRMS:Q)', 'stderr': 'err', 'stdin': '/dev/null', 'stdout': 'out', 'submitted': '2016-06-18 18:01:23', 'submitted from': '131.169.168.65:56263', 'waiting position': '1'}
gsiftp://grid-arc0.desy.de:2811/jobs/xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn QUEUED {'<RAW_STATUS>': 'queuing', 'entry valid for': '20 minutes', 'entry valid from': '2016-06-18 18:20:24', 'id on service': 'xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn', 'job management url': 'gsiftp://grid-arc0.desy.de:2811/jobs (org.nordugrid.gridftpjob)', 'job status url': 'ldap://grid-arc0.desy.de:2135/Mds-Vo-Name=local,o=grid??sub?(nordugrid-job-globalid=gsiftp:\\\\2f\\\\2fgrid-arc0.desy.de:2811\\\\2fjobs\\\\2fxBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn) (org.nordugrid.ldapng)', 'name': 'ARC', 'other messages': 'SubmittedVia=org.nordugrid.gridftpjob', 'owner': '/C=DE/O=GermanGrid/OU=uni-hamburg/CN=Fred Markus Stober', 'queue': 'grid', 'requested slots': '1', 'service information url': 'ldap://grid-arc0.desy.de:2135/Mds-Vo-Name=local,o=grid??sub?(objectClass=*) (org.nordugrid.ldapng)', 'session directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn', 'stagein directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn', 'stageout directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn', 'state': 'Queuing (INLRMS:Q)', 'stderr': 'err', 'stdin': '/dev/null', 'stdout': 'out', 'submitted': '2016-06-18 18:06:17', 'submitted from': '131.169.168.65:56391', 'waiting position': '2'}
STATUS=OK
>>> test_check('ARCCheckJobs', 'test.ARC.status2')
gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m DONE {'<RAW_STATUS>': 'finished', 'end time': '2016-06-18 19:14:05', 'entry valid for': '20 minutes', 'entry valid from': '2016-06-18 20:30:10', 'exit code': '0', 'id on service': 'U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m', 'job management url': 'gsiftp://grid-arc0.desy.de:2811/jobs (org.nordugrid.gridftpjob)', 'job status url': 'ldap://grid-arc0.desy.de:2135/Mds-Vo-Name=local,o=grid??sub?(nordugrid-job-globalid=gsiftp:\\\\2f\\\\2fgrid-arc0.desy.de:2811\\\\2fjobs\\\\2fU3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m) (org.nordugrid.ldapng)', 'name': 'ARC', 'other messages': 'SubmittedVia=org.nordugrid.gridftpjob', 'owner': '/C=DE/O=GermanGrid/OU=uni-hamburg/CN=Fred Markus Stober', 'queue': 'grid', 'requested slots': '1', 'results must be retrieved before': '2016-06-25 18:21:58', 'service information url': 'ldap://grid-arc0.desy.de:2135/Mds-Vo-Name=local,o=grid??sub?(objectClass=*) (org.nordugrid.ldapng)', 'session directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m', 'stagein directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m', 'stageout directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/U3pLDmfrXYonXlDnJp9YSwCoABFKDmABFKDmWpJKDmABFKDmoDon2m', 'state': 'Finished (FINISHED)', 'stderr': 'err', 'stdin': '/dev/null', 'stdout': 'out', 'submitted': '2016-06-18 18:01:23', 'submitted from': '131.169.168.65:56263'}
gsiftp://grid-arc0.desy.de:2811/jobs/xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn DONE {'<RAW_STATUS>': 'finished', 'end time': '2016-06-18 19:15:27', 'entry valid for': '20 minutes', 'entry valid from': '2016-06-18 20:30:10', 'exit code': '0', 'id on service': 'xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn', 'job management url': 'gsiftp://grid-arc0.desy.de:2811/jobs (org.nordugrid.gridftpjob)', 'job status url': 'ldap://grid-arc0.desy.de:2135/Mds-Vo-Name=local,o=grid??sub?(nordugrid-job-globalid=gsiftp:\\\\2f\\\\2fgrid-arc0.desy.de:2811\\\\2fjobs\\\\2fxBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn) (org.nordugrid.ldapng)', 'name': 'ARC', 'other messages': 'SubmittedVia=org.nordugrid.gridftpjob', 'owner': '/C=DE/O=GermanGrid/OU=uni-hamburg/CN=Fred Markus Stober', 'queue': 'grid', 'requested slots': '1', 'results must be retrieved before': '2016-06-25 18:33:07', 'service information url': 'ldap://grid-arc0.desy.de:2135/Mds-Vo-Name=local,o=grid??sub?(objectClass=*) (org.nordugrid.ldapng)', 'session directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn', 'stagein directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn', 'stageout directory url': 'gsiftp://grid-arc0.desy.de:2811/jobs/xBBMDmPwXYonXlDnJp9YSwCoABFKDmABFKDmlqLKDmABFKDmycOlCn', 'state': 'Finished (FINISHED)', 'stderr': 'err', 'stdin': '/dev/null', 'stdout': 'out', 'submitted': '2016-06-18 18:06:17', 'submitted from': '131.169.168.65:56391'}
STATUS=OK
"""
class Test_Condor:
"""
>>> test_check('CondorCheckJobs', wms_id_list=['78905.0'])
STATUS=OK
>>> test_check('CondorCheckJobs', 'test.Condor.status_long1')
78905.0 RUNNING {'<RAW_STATUS>': 2, '<WN>': 'slot1@moab-vm-702139.bwforcluster', 'CompletionDate': '0', 'GlobalJobId': 'ekpcms6.physik.uni-karlsruhe.de#78905.0#1466285011', 'JobCurrentStartDate': '1466285012', 'JobStartDate': '1466285012', 'QDate': '1466285011'}
78905.1 RUNNING {'<RAW_STATUS>': 2, '<WN>': 'slot2@moab-vm-702139.bwforcluster', 'CompletionDate': '0', 'GlobalJobId': 'ekpcms6.physik.uni-karlsruhe.de#78905.1#1466285011', 'JobCurrentStartDate': '1466285012', 'JobStartDate': '1466285012', 'QDate': '1466285011'}
STATUS=OK
>>> test_check('CondorCheckJobs', src_err='test.Condor.status_long2', src_ret=123)
Process '<testsuite dir>/bin/condor_q' '-long' finished with exit code 123
STATUS=ERROR
"""
class Test_CREAM:
"""
>>> test_check('CREAMCheckJobs', wms_id_list=['https://grid-cr0.desy.de:8443/CREAM052718106'])
STATUS=OK
>>> test_check('CREAMCheckJobs', 'test.CREAM.status')
https://grid-cr0.desy.de:8443/CREAM052718106 ABORTED {'<RAW_STATUS>': 'ABORTED', 'description': 'submission to BLAH failed [retry count=3]', 'failurereason': 'BLAH error: submission command failed (exit code = 1) (stdout:) (stderr:qsub: Unauthorized Request MSG=group ACL is not satisfied: user cmsger030@grid-cr0.desy.de, queue desy-) N/A (jobId = CREAM052718106)'}
https://grid-cr0.desy.de:8443/CREAM910144555 ABORTED {'<RAW_STATUS>': 'ABORTED', 'description': 'submission to BLAH failed [retry count=3]', 'failurereason': 'BLAH error: submission command failed (exit code = 1) (stdout:) (stderr:qsub: Unauthorized Request MSG=group ACL is not satisfied: user cmsger030@grid-cr0.desy.de, queue desy-) N/A (jobId = CREAM910144555)'}
STATUS=OK
>>> test_check('CREAMCheckJobs', 'test.CREAM.status', src_ret=1)
https://grid-cr0.desy.de:8443/CREAM052718106 ABORTED {'<RAW_STATUS>': 'ABORTED', 'description': 'submission to BLAH failed [retry count=3]', 'failurereason': 'BLAH error: submission command failed (exit code = 1) (stdout:) (stderr:qsub: Unauthorized Request MSG=group ACL is not satisfied: user cmsger030@grid-cr0.desy.de, queue desy-) N/A (jobId = CREAM052718106)'}
https://grid-cr0.desy.de:8443/CREAM910144555 ABORTED {'<RAW_STATUS>': 'ABORTED', 'description': 'submission to BLAH failed [retry count=3]', 'failurereason': 'BLAH error: submission command failed (exit code = 1) (stdout:) (stderr:qsub: Unauthorized Request MSG=group ACL is not satisfied: user cmsger030@grid-cr0.desy.de, queue desy-) N/A (jobId = CREAM910144555)'}
Process '<testsuite dir>/bin/glite-ce-job-status' '--level' '0' '--logfile' '/dev/stderr' finished with exit code 1
STATUS=OK
"""
class Test_GliteWMS:
"""
>>> test_check('GridCheckJobs', wms_id_list=['https://lb-3-fzk.gridka.de:9000/GgeNd6MD5REG2KsbN5dmEg'], check_exec='glite-wms-job-status')
STATUS=OK
>>> test_check('GridCheckJobs', 'test.GliteWMS.status1', check_exec='glite-wms-job-status')
https://lb-3-fzk.gridka.de:9000/GgeNd6MD5REG2KsbN5dmEg QUEUED {'<QUEUE>': 'jobmanager-pbspro-cmsXS', '<RAW_STATUS>': 'scheduled', '<SITE>': 'ce-2-fzk.gridka.de:2119', 'reason': 'Job successfully submitted to Globus', 'timestamp': 1289766838}
STATUS=OK
>>> test_check('GridCheckJobs', 'test.GliteWMS.status2', check_exec='glite-wms-job-status')
https://grid-lb0.desy.de:9000/gI6QdIJdvkCj3V2nqRQInw DONE {'<QUEUE>': 'cream-pbs-cms', '<RAW_STATUS>': 'done', '<SITE>': 'cream02.athena.hellasgrid.gr:8443', 'exit code': '0', 'reason': 'Job Terminated Successfully', 'timestamp': 1289772322}
https://lb-3-fzk.gridka.de:9000/h261HCD0QRxIn1gr-8Q8VA RUNNING {'<QUEUE>': 'jobmanager-lcglsf-grid_2nh_cms', '<RAW_STATUS>': 'running', '<SITE>': 'ce126.cern.ch:2119', 'reason': 'Job successfully submitted to Globus', 'timestamp': 1289772334}
STATUS=OK
>>> test_check('GridCheckJobs', 'test.GliteWMS.status3', check_exec='glite-wms-job-status')
https://lb-1-fzk.gridka.de:9000/Cr-LicZeP8kiaLG77_JXyQ DONE {'<QUEUE>': 'jobmanager-lcgpbs-cms', '<RAW_STATUS>': 'done', '<SITE>': 't2-ce-02.to.infn.it:2119', 'exit code': '0', 'reason': 'Job terminated successfully', 'timestamp': 1289772799}
https://lb-3-fzk.gridka.de:9000/h261HCD0QRxIn1gr-8Q8VA DONE {'<QUEUE>': 'jobmanager-lcglsf-grid_2nh_cms', '<RAW_STATUS>': 'done', '<SITE>': 'ce126.cern.ch:2119', 'exit code': '0', 'reason': 'Job terminated successfully', 'timestamp': 1289772334}
STATUS=OK
"""
class Test_GridEngine:
"""
>>> test_check('GridEngineCheckJobs', 'test.GridEngine.status1')
6323 RUNNING {'<QUEUE>': 'short', '<RAW_STATUS>': 'r', '<WN>': 'ekpplus008.ekpplus.cluster', 'JAT_prio': '0.55077', 'JAT_start_time': '2010-11-04T20:28:38', 'JB_name': 'GC11e8a12f.12', 'JB_owner': 'stober', 'queue_name': 'short@ekpplus008.ekpplus.cluster', 'slots': '1'}
6324 RUNNING {'<QUEUE>': 'short', '<RAW_STATUS>': 't', '<WN>': 'ekpplus001.ekpplus.cluster', 'JAT_prio': '0.55071', 'JAT_start_time': '2010-11-04T20:28:38', 'JB_name': 'GC11e8a12f.13', 'JB_owner': 'stober', 'queue_name': 'short@ekpplus001.ekpplus.cluster', 'slots': '1'}
6350 UNKNOWN {'<RAW_STATUS>': 'Eqw', 'JAT_prio': '0.55025', 'JB_name': 'GC11e8a12f.38', 'JB_owner': 'stober', 'JB_submission_time': '2010-11-04T20:28:34', 'slots': '1'}
6352 QUEUED {'<RAW_STATUS>': 'qw', 'JAT_prio': '0.00000', 'JB_name': 'GC11e8a12f.40', 'JB_owner': 'stober', 'JB_submission_time': '2010-11-04T20:28:38', 'slots': '1'}
STATUS=OK
>>> test_check('GridEngineCheckJobs', 'test.GridEngine.status1', config_dict={'backend': {'user': ''}})
6323 RUNNING {'<QUEUE>': 'short', '<RAW_STATUS>': 'r', '<WN>': 'ekpplus008.ekpplus.cluster', 'JAT_prio': '0.55077', 'JAT_start_time': '2010-11-04T20:28:38', 'JB_name': 'GC11e8a12f.12', 'JB_owner': 'stober', 'queue_name': 'short@ekpplus008.ekpplus.cluster', 'slots': '1'}
6324 RUNNING {'<QUEUE>': 'short', '<RAW_STATUS>': 't', '<WN>': 'ekpplus001.ekpplus.cluster', 'JAT_prio': '0.55071', 'JAT_start_time': '2010-11-04T20:28:38', 'JB_name': 'GC11e8a12f.13', 'JB_owner': 'stober', 'queue_name': 'short@ekpplus001.ekpplus.cluster', 'slots': '1'}
6350 UNKNOWN {'<RAW_STATUS>': 'Eqw', 'JAT_prio': '0.55025', 'JB_name': 'GC11e8a12f.38', 'JB_owner': 'stober', 'JB_submission_time': '2010-11-04T20:28:34', 'slots': '1'}
6352 QUEUED {'<RAW_STATUS>': 'qw', 'JAT_prio': '0.00000', 'JB_name': 'GC11e8a12f.40', 'JB_owner': 'stober', 'JB_submission_time': '2010-11-04T20:28:38', 'slots': '1'}
STATUS=OK
>>> test_check('GridEngineCheckJobs', 'test.GridEngine.status2')
STATUS=OK
>>> try_catch(lambda: test_check('GridEngineCheckJobs', 'test.GridEngine.status3'), 'BackendError', 'parse qstat XML output')
caught
>>> try_catch(lambda: test_check('GridEngineCheckJobs', 'test.GridEngine.status4'), 'BackendError', 'Error reading job info')
caught
>>> test_check('GridEngineCheckJobs', 'test.GridEngine.status5')
6352 READY {'<RAW_STATUS>': 'xxx', 'JAT_prio': '0.00000', 'JB_name': 'GC11e8a12f.40', 'JB_owner': 'stober', 'JB_submission_time': '2010-11-04T20:28:38', 'slots': '1'}
STATUS=OK
"""
class Test_Host:
"""
>>> test_check('HostCheckJobs', wms_id_list=['1036587'])
STATUS=OK
>>> test_check('HostCheckJobs', 'test.Host.status', overwrite_exec='../bin/ps')
12862 RUNNING {'<QUEUE>': 'localqueue', '<RAW_STATUS>': 'SN', '<WN>': 'localhost', 'command': '/bin/bash /usr/users/stober/grid-control/share/local.sh', 'cpu': '0.0', 'mem': '0.0', 'rss': '1244', 'start': '20:33', 'time': '0:00', 'tty': 'pts/62', 'user': 'stober', 'vsz': '63792'}
12868 RUNNING {'<QUEUE>': 'localqueue', '<RAW_STATUS>': 'SN', '<WN>': 'localhost', 'command': '/bin/bash /usr/users/stober/grid-control/share/local.sh', 'cpu': '0.0', 'mem': '0.0', 'rss': '1244', 'start': '20:33', 'time': '0:00', 'tty': 'pts/62', 'user': 'stober', 'vsz': '63792'}
12878 UNKNOWN {'<QUEUE>': 'localqueue', '<RAW_STATUS>': 'Z', '<WN>': 'localhost', 'command': '/bin/bash /usr/users/stober/grid-control/share/local.sh', 'cpu': '0.0', 'mem': '0.0', 'rss': '1244', 'start': '20:33', 'time': '0:00', 'tty': 'pts/62', 'user': 'stober', 'vsz': '63792'}
STATUS=OK
"""
class Test_JMS:
"""
>>> test_check('JMSCheckJobs', wms_id_list=['1036587'])
STATUS=OK
>>> test_check('JMSCheckJobs', 'test.JMS.status')
456808 RUNNING {'<QUEUE>': 'b', '<RAW_STATUS>': 'r', '<WN>': '1*003', 'cpu_time': '60', 'group': 'bd00', 'job_name': 'GCdc509df1.0', 'kill_time': '4/20:43', 'memory': '1000', 'nodes': '1/1/1', 'partition': 't', 'queue_time': '4/19:43', 'start_time': '4/19:43', 'user': 'bd105', 'wall_time': '60'}
456809 RUNNING {'<QUEUE>': 'b', '<RAW_STATUS>': 'r', '<WN>': '1*030', 'cpu_time': '60', 'group': 'bd00', 'job_name': 'GCdc509df1.1', 'kill_time': '4/20:43', 'memory': '1000', 'nodes': '1/1/1', 'partition': 't', 'queue_time': '4/19:43', 'start_time': '4/19:43', 'user': 'bd105', 'wall_time': '60'}
456810 WAITING {'<QUEUE>': 'b', '<RAW_STATUS>': 'w', 'cpu_time': '60', 'group': 'bd00', 'job_name': 'GCdc509df1.2', 'memory': '1000', 'nodes': '1/1/1', 'partition': 't', 'queue_time': '4/19:43', 'user': 'bd105', 'wall_time': '60'}
456818 WAITING {'<QUEUE>': 'b', '<RAW_STATUS>': 'w', 'cpu_time': '60', 'group': 'bd00', 'job_name': 'GCdc509df1.10', 'memory': '1000', 'nodes': '1/1/1', 'partition': 't', 'queue_time': '4/19:43', 'user': 'bd105', 'wall_time': '60'}
456819 WAITING {'<QUEUE>': 'b', '<RAW_STATUS>': 'w', 'cpu_time': '60', 'group': 'bd00', 'job_name': 'GCdc509df1.11', 'memory': '1000', 'nodes': '1/1/1', 'partition': 't', 'queue_time': '4/19:43', 'user': 'bd105', 'wall_time': '60'}
STATUS=OK
"""
class Test_LSF:
"""
>>> test_check('LSFCheckJobs', wms_id_list=['1036587'])
STATUS=OK
>>> test_check('LSFCheckJobs', 'test.LSF.status1')
103506916 QUEUED {'<QUEUE>': '8nm', '<RAW_STATUS>': 'PEND', '<WN>': '-', 'from': 'lxplus235', 'job_name': 'GC81e0208d.0', 'submit_time': 'Nov 4 20:07', 'user': 'stober'}
103506921 QUEUED {'<QUEUE>': '8nm', '<RAW_STATUS>': 'PEND', '<WN>': '-', 'from': 'lxplus235', 'job_name': 'GC81e0208d.3', 'submit_time': 'Nov 4 20:07', 'user': 'stober'}
103506923 QUEUED {'<QUEUE>': '8nm', '<RAW_STATUS>': 'PEND', '<WN>': '-', 'from': 'lxplus235', 'job_name': 'GC81e0208d.4', 'submit_time': 'Nov 4 20:07', 'user': 'stober'}
103606918 RUNNING {'<QUEUE>': '8nm', '<RAW_STATUS>': 'RUN', '<WN>': 'lxbsu0606', 'from': 'lxplus235', 'job_name': 'GC81e0208d.7', 'submit_time': 'Nov 4 20:07', 'user': 'stober'}
103606919 RUNNING {'<QUEUE>': '8nm', '<RAW_STATUS>': 'RUN', '<WN>': 'lxbsu0647', 'from': 'lxplus235', 'job_name': 'GC81e0208d.8', 'submit_time': 'Nov 4 20:07', 'user': 'stober'}
103606921 RUNNING {'<QUEUE>': '8nm', '<RAW_STATUS>': 'RUN', '<WN>': 'lxbsq1446', 'from': 'lxplus235', 'job_name': 'GC81e0208d.9', 'submit_time': 'Nov 4 20:07', 'user': 'stober'}
103606923 DONE {'<QUEUE>': '8nm', '<RAW_STATUS>': 'DONE', '<WN>': 'lxbsq0620', 'from': 'lxplus235', 'job_name': 'GC81e0208d.5', 'submit_time': 'Nov 4 20:07', 'user': 'stober'}
103507052 QUEUED {'<QUEUE>': '8nm', '<RAW_STATUS>': 'PEND', '<WN>': '-', 'from': 'lxplus235', 'job_name': 'GC81e0208d.10', 'submit_time': 'Nov 4 20:09', 'user': 'stober'}
103507054 QUEUED {'<QUEUE>': '8nm', '<RAW_STATUS>': 'PEND', '<WN>': '-', 'from': 'lxplus235', 'job_name': 'GC81e0208d.11', 'submit_time': 'Nov 4 20:09', 'user': 'stober'}
103507056 QUEUED {'<QUEUE>': '8nm', '<RAW_STATUS>': 'PEND', '<WN>': '-', 'from': 'lxplus235', 'job_name': 'GC81e0208d.13', 'submit_time': 'Nov 4 20:09', 'user': 'stober'}
STATUS=OK
>>> try_catch(lambda: test_check('LSFCheckJobs', 'test.LSF.status2'), 'BackendError', 'Unable to parse job info')
caught
"""
class Test_PBS:
"""
>>> test_check('PBSCheckJobs', wms_id_list=['1036587'])
STATUS=OK
>>> test_check('PBSCheckJobs', 'test.PBS.status')
1036587 QUEUED {'<QUEUE>': 'VM_SLC5', '<RAW_STATUS>': 'Q', 'checkpoint': 'u', 'ctime': 'Thu Nov 4 19:40:24 2010', 'error_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0000.qbNZnY/gc.stderr', 'etime': 'Thu Nov 4 19:40:24 2010', 'fault_tolerant': 'False', 'hold_types': 'n', 'job_name': 'GCdc509df1.0', 'job_owner': 'bd105@ic1n991.localdomain', 'join_path': 'n', 'keep_files': 'n', 'mail_points': 'a', 'mtime': 'Thu Nov 4 19:40:24 2010', 'output_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0000.qbNZnY/gc.stdout', 'priority': 0, 'qtime': 'Thu Nov 4 19:40:24 2010', 'rerunable': 'True', 'resource_list.nice': 0, 'server': 'ic-pbs.localdomain', 'submit_args': '-N GCdc509df1.0 -q VM_SLC5 -v GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0000.qbNZnY -o /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0000.qbNZnY/gc.stdout -e /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0000.qbNZnY/gc.stderr /pfs/data/home/kit/bd00/bd105/grid-control/share/local.sh', 'variable_list': 'PBS_O_HOME=/home/ws/bd105,PBS_O_LANG=en_US.UTF-8,PBS_O_LOGNAME=bd105,PBS_O_PATH=/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/all/bin:/opt/intel/Compiler/11.1/073/bin/intel64:/opt/intel/Compiler/11.1/073/bin:/jms/bin:/home/ws/bd105/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/X11R6/bin:/usr/games:/opt/kde3/bin:/usr/lib64/jvm/jre/bin:/usr/lib/mit/bin:/usr/lib/mit/sbin:.:/opt/openmpi/1.4.3/bin:/software/ssck/bin,PBS_O_MAIL=/var/mail/bd105,PBS_O_SHELL=/bin/bash,PBS_O_HOST=ic1n991.localdomain,PBS_SERVER=ic-pbs.localdomain,PBS_O_WORKDIR=/pfs/data/home/kit/bd00/bd105/grid-control/docs,GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0000.qbNZnY,PBS_O_QUEUE=VM_SLC5'}
1036571 RUNNING {'<QUEUE>': 'VM_SLC5', '<RAW_STATUS>': 'R', '<WN>': 'ic1n027.ic-pbs.localdomain', 'checkpoint': 'u', 'ctime': 'Thu Nov 4 19:40:07 2010', 'error_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0001.__tx2d/gc.stderr', 'etime': 'Thu Nov 4 19:40:07 2010', 'fault_tolerant': 'False', 'hold_types': 'n', 'job_name': 'GCdc509df1.1', 'job_owner': 'bd105@ic1n991.localdomain', 'join_path': 'n', 'keep_files': 'n', 'mail_points': 'a', 'mtime': 'Thu Nov 4 19:40:10 2010', 'output_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0001.__tx2d/gc.stdout', 'priority': 0, 'qtime': 'Thu Nov 4 19:40:07 2010', 'rerunable': 'True', 'resource_list.nice': 0, 'server': 'ic-pbs.localdomain', 'session_id': 9107, 'start_count': 1, 'start_time': 'Thu Nov 4 19:40:07 2010', 'submit_args': '-N GCdc509df1.1 -q VM_SLC5 -v GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0001.__tx2d -o /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0001.__tx2d/gc.stdout -e /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0001.__tx2d/gc.stderr /pfs/data/home/kit/bd00/bd105/grid-control/share/local.sh', 'variable_list': 'PBS_O_HOME=/home/ws/bd105,PBS_O_LANG=en_US.UTF-8,PBS_O_LOGNAME=bd105,PBS_O_PATH=/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/all/bin:/opt/intel/Compiler/11.1/073/bin/intel64:/opt/intel/Compiler/11.1/073/bin:/jms/bin:/home/ws/bd105/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/X11R6/bin:/usr/games:/opt/kde3/bin:/usr/lib64/jvm/jre/bin:/usr/lib/mit/bin:/usr/lib/mit/sbin:.:/opt/openmpi/1.4.3/bin:/software/ssck/bin,PBS_O_MAIL=/var/mail/bd105,PBS_O_SHELL=/bin/bash,PBS_O_HOST=ic1n991.localdomain,PBS_SERVER=ic-pbs.localdomain,PBS_O_WORKDIR=/pfs/data/home/kit/bd00/bd105/grid-control/docs,GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0001.__tx2d,PBS_O_QUEUE=VM_SLC5'}
1036589 QUEUED {'<QUEUE>': 'VM_SLC5', '<RAW_STATUS>': 'Q', 'checkpoint': 'u', 'ctime': 'Thu Nov 4 19:40:24 2010', 'error_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0017.8gGyXF/gc.stderr', 'etime': 'Thu Nov 4 19:40:24 2010', 'fault_tolerant': 'False', 'hold_types': 'n', 'job_name': 'GCdc509df1.17', 'job_owner': 'bd105@ic1n991.localdomain', 'join_path': 'n', 'keep_files': 'n', 'mail_points': 'a', 'mtime': 'Thu Nov 4 19:40:24 2010', 'output_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0017.8gGyXF/gc.stdout', 'priority': 0, 'qtime': 'Thu Nov 4 19:40:24 2010', 'rerunable': 'True', 'resource_list.nice': 0, 'server': 'ic-pbs.localdomain', 'submit_args': '-N GCdc509df1.17 -q VM_SLC5 -v GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0017.8gGyXF -o /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0017.8gGyXF/gc.stdout -e /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0017.8gGyXF/gc.stderr /pfs/data/home/kit/bd00/bd105/grid-control/share/local.sh', 'variable_list': 'PBS_O_HOME=/home/ws/bd105,PBS_O_LANG=en_US.UTF-8,PBS_O_LOGNAME=bd105,PBS_O_PATH=/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/all/bin:/opt/intel/Compiler/11.1/073/bin/intel64:/opt/intel/Compiler/11.1/073/bin:/jms/bin:/home/ws/bd105/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/X11R6/bin:/usr/games:/opt/kde3/bin:/usr/lib64/jvm/jre/bin:/usr/lib/mit/bin:/usr/lib/mit/sbin:.:/opt/openmpi/1.4.3/bin:/software/ssck/bin,PBS_O_MAIL=/var/mail/bd105,PBS_O_SHELL=/bin/bash,PBS_O_HOST=ic1n991.localdomain,PBS_SERVER=ic-pbs.localdomain,PBS_O_WORKDIR=/pfs/data/home/kit/bd00/bd105/grid-control/docs,GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0017.8gGyXF,PBS_O_QUEUE=VM_SLC5'}
1036590 QUEUED {'<QUEUE>': 'VM_SLC5', '<RAW_STATUS>': 'Q', 'checkpoint': 'u', 'ctime': 'Thu Nov 4 19:40:24 2010', 'error_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0018.fJtP_K/gc.stderr', 'etime': 'Thu Nov 4 19:40:24 2010', 'fault_tolerant': 'False', 'hold_types': 'n', 'job_name': 'GCdc509df1.18', 'job_owner': 'bd105@ic1n991.localdomain', 'join_path': 'n', 'keep_files': 'n', 'mail_points': 'a', 'mtime': 'Thu Nov 4 19:40:24 2010', 'output_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0018.fJtP_K/gc.stdout', 'priority': 0, 'qtime': 'Thu Nov 4 19:40:24 2010', 'rerunable': 'True', 'resource_list.nice': 0, 'server': 'ic-pbs.localdomain', 'submit_args': '-N GCdc509df1.18 -q VM_SLC5 -v GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0018.fJtP_K -o /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0018.fJtP_K/gc.stdout -e /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0018.fJtP_K/gc.stderr /pfs/data/home/kit/bd00/bd105/grid-control/share/local.sh', 'variable_list': 'PBS_O_HOME=/home/ws/bd105,PBS_O_LANG=en_US.UTF-8,PBS_O_LOGNAME=bd105,PBS_O_PATH=/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/all/bin:/opt/intel/Compiler/11.1/073/bin/intel64:/opt/intel/Compiler/11.1/073/bin:/jms/bin:/home/ws/bd105/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/X11R6/bin:/usr/games:/opt/kde3/bin:/usr/lib64/jvm/jre/bin:/usr/lib/mit/bin:/usr/lib/mit/sbin:.:/opt/openmpi/1.4.3/bin:/software/ssck/bin,PBS_O_MAIL=/var/mail/bd105,PBS_O_SHELL=/bin/bash,PBS_O_HOST=ic1n991.localdomain,PBS_SERVER=ic-pbs.localdomain,PBS_O_WORKDIR=/pfs/data/home/kit/bd00/bd105/grid-control/docs,GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0018.fJtP_K,PBS_O_QUEUE=VM_SLC5'}
1036536 RUNNING {'<QUEUE>': 'short', '<RAW_STATUS>': 'R', '<WN>': 'ic1n006.ic-pbs.localdomain', 'checkpoint': 'u', 'ctime': 'Thu Nov 4 19:38:27 2010', 'error_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0010.gXQPSk/gc.stderr', 'etime': 'Thu Nov 4 19:38:27 2010', 'fault_tolerant': 'False', 'hold_types': 'n', 'job_name': 'GCdc509df1.10', 'job_owner': 'bd105@ic1n991.localdomain', 'join_path': 'n', 'keep_files': 'n', 'mail_points': 'a', 'mtime': 'Thu Nov 4 19:38:28 2010', 'output_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0010.gXQPSk/gc.stdout', 'priority': 0, 'qtime': 'Thu Nov 4 19:38:27 2010', 'rerunable': 'True', 'resource_list.cput': '01:00:00', 'resource_list.nice': 0, 'resource_list.walltime': '03:05:00', 'server': 'ic-pbs.localdomain', 'session_id': 21361, 'start_count': 1, 'start_time': 'Thu Nov 4 19:38:28 2010', 'submit_args': '-N GCdc509df1.10 -q short -v GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0010.gXQPSk -o /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0010.gXQPSk/gc.stdout -e /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0010.gXQPSk/gc.stderr /pfs/data/home/kit/bd00/bd105/grid-control/share/local.sh', 'variable_list': 'PBS_O_HOME=/home/ws/bd105,PBS_O_LANG=en_US.UTF-8,PBS_O_LOGNAME=bd105,PBS_O_PATH=/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/all/bin:/opt/intel/Compiler/11.1/073/bin/intel64:/opt/intel/Compiler/11.1/073/bin:/jms/bin:/home/ws/bd105/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/X11R6/bin:/usr/games:/opt/kde3/bin:/usr/lib64/jvm/jre/bin:/usr/lib/mit/bin:/usr/lib/mit/sbin:.:/opt/openmpi/1.4.3/bin:/software/ssck/bin,PBS_O_MAIL=/var/mail/bd105,PBS_O_SHELL=/bin/bash,PBS_O_HOST=ic1n991.localdomain,PBS_SERVER=ic-pbs.localdomain,PBS_O_WORKDIR=/pfs/data/home/kit/bd00/bd105/grid-control/docs,GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0010.gXQPSk,PBS_O_QUEUE=short', 'walltime.remaining': 1108}
1036537 RUNNING {'<QUEUE>': 'short', '<RAW_STATUS>': 'R', '<WN>': 'ic1n006.ic-pbs.localdomain', 'checkpoint': 'u', 'ctime': 'Thu Nov 4 19:38:28 2010', 'error_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0011.NBfyUT/gc.stderr', 'etime': 'Thu Nov 4 19:38:28 2010', 'fault_tolerant': 'False', 'hold_types': 'n', 'job_name': 'GCdc509df1.11', 'job_owner': 'bd105@ic1n991.localdomain', 'join_path': 'n', 'keep_files': 'n', 'mail_points': 'a', 'mtime': 'Thu Nov 4 19:38:28 2010', 'output_path': 'ic1n991:/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0011.NBfyUT/gc.stdout', 'priority': 0, 'qtime': 'Thu Nov 4 19:38:28 2010', 'rerunable': 'True', 'resource_list.cput': '01:00:00', 'resource_list.nice': 0, 'resource_list.walltime': '03:05:00', 'server': 'ic-pbs.localdomain', 'session_id': 21363, 'start_count': 1, 'start_time': 'Thu Nov 4 19:38:28 2010', 'submit_args': '-N GCdc509df1.11 -q short -v GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0011.NBfyUT -o /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0011.NBfyUT/gc.stdout -e /pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0011.NBfyUT/gc.stderr /pfs/data/home/kit/bd00/bd105/grid-control/share/local.sh', 'variable_list': 'PBS_O_HOME=/home/ws/bd105,PBS_O_LANG=en_US.UTF-8,PBS_O_LOGNAME=bd105,PBS_O_PATH=/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/kit/bd00/pbs/torque/bin:/software/kit/bd00/tools/bin:/home/ws/bd105/bin:/software/all/bin:/opt/intel/Compiler/11.1/073/bin/intel64:/opt/intel/Compiler/11.1/073/bin:/jms/bin:/home/ws/bd105/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/X11R6/bin:/usr/games:/opt/kde3/bin:/usr/lib64/jvm/jre/bin:/usr/lib/mit/bin:/usr/lib/mit/sbin:.:/opt/openmpi/1.4.3/bin:/software/ssck/bin,PBS_O_MAIL=/var/mail/bd105,PBS_O_SHELL=/bin/bash,PBS_O_HOST=ic1n991.localdomain,PBS_SERVER=ic-pbs.localdomain,PBS_O_WORKDIR=/pfs/data/home/kit/bd00/bd105/grid-control/docs,GC_SANDBOX=/pfs/data/home/kit/bd00/bd105/grid-control/docs/work.USERMOD-parameter/sandbox/GCdc509df15ea2.0011.NBfyUT,PBS_O_QUEUE=short', 'walltime.remaining': 1108}
STATUS=OK
"""
class Test_SLURM:
"""
"""
run_test()
|
990,352 | da9191e54b1177c4138170da01204691101bfc09 | import httplib, urllib, base64, json
import time
headers = {
# Request headers.
'Content-Type': 'application/json',
# NOTE: Replace the "Ocp-Apim-Subscription-Key" value with a valid subscription key.
'Ocp-Apim-Subscription-Key': '69766cdb74e748cd9266eb53fae6316f',
}
# Replace 'examplegroupid' with an ID you haven't used for creating a group before.
# The valid characters for the ID include numbers, English letters in lower case, '-' and '_'.
# The maximum length of the ID is 64.
personGroupId = 'patients'
personId = '60bace47-d012-4f1e-9e01-e852e3311791'
#################################################################################
# DELETE GROUP
##################################################################################
# The userData field is optional. The size limit for it is 16KB.
body = "{ 'name':'group1', 'userData':'user-provided data attached to the person group' }"
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
conn.request("DELETE", "/face/v1.0/persongroups/%s" % personGroupId, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
#################################################################################
# CREATE GROUP
##################################################################################
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
conn.request("PUT", "/face/v1.0/persongroups/%s" % personGroupId, body, headers)
response = conn.getresponse()
print(response.reason)
conn.close()
#################################################################################
# CREATE PERSON IN GROUP
##################################################################################
body = "{ 'name':'Russell', 'userData':'%s'}" % "test"
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
conn.request("POST", "/face/v1.0/persongroups/%s/persons?" % personGroupId, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
personId = str(data['personId'])
conn.close()
#################################################################################
# GET PERSON FROM GROUP
##################################################################################
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
conn.request("GET", "/face/v1.0/persongroups/%s/persons/%s?" % (personGroupId, personId), body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
#################################################################################
# ADD PICTURE TO PERSON
##################################################################################
headers['Content-Type'] = 'application/octet-stream'
filename = '/home/raab/russell1.jpg'
f = open(filename, "rb")
body = f.read()
f.close()
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
string = "/face/v1.0/persongroups/%s/persons/%s/persistedFaces?" % (personGroupId, personId)
conn.request("POST", string, body, headers)
response = conn.getresponse("")
data = json.loads(response.read())
print(data)
conn.close()
#################################################################################
# GET PICTURE FROM PERSON
##################################################################################
persistedFaceId = data['persistedFaceId']
body = "{ 'name':'Russell', 'userData':'%s'}" % "test"
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
conn.request("GET", "/face/v1.0/persongroups/%s/persons/%s/persistedFaces/%s?" % (personGroupId, personId, persistedFaceId), body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
#################################################################################
# TRAIN GROUP ON IMAGES
##################################################################################
params = urllib.urlencode({
})
headers = {
# Request headers.
'Content-Type': 'application/json',
# NOTE: Replace the "Ocp-Apim-Subscription-Key" value with a valid subscription key.
'Ocp-Apim-Subscription-Key': '69766cdb74e748cd9266eb53fae6316f',
}
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
conn.request("POST", "/face/v1.0/persongroups/%s/train?" % (personGroupId), params, headers)
response = conn.getresponse()
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
data = response.read()
print(data)
conn.close()
#################################################################################
# ANALYSE NEW QUERY IMAGE
##################################################################################
params = urllib.urlencode({
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
})
headers['Content-Type'] = 'application/octet-stream'
filename = '/home/raab/russell1.jpg'
f = open(filename, "rb")
body = f.read()
f.close()
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
conn.request("POST", "/face/v1.0/detect?%s" % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
print(data[0])
faceID = data[0]['faceId']
conn.close()
#################################################################################
# COMPARE QUERY IMAGE TO GROUP
##################################################################################
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': '69766cdb74e748cd9266eb53fae6316f',
}
params = urllib.urlencode({
})
body = str({
"personGroupId":str(personGroupId),
"faceIds":[str(faceID)],
"maxNumOfCandidatesReturned":1,
"confidenceThreshold": 0.5
})
print("Here")
time.sleep(3)
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print("/face/v1.0/identify?%s" % params, body, headers)
conn.request("POST", "/face/v1.0/identify?%s" % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close() |
990,353 | 17843ec288ba1e31db06f48e2ab9aa9a5c748083 | import numpy as np, cv2
# 회선 수행 함수
def filter(image, mask):
rows, cols = image.shape[:2]
dst = np.zeros((rows, cols), np.float32) # 회선 결과 저장 행렬
xcenter, ycenter = mask.shape[1] // 2, mask.shape[0] // 2 # 마스크 중심 좌표
for i in range(ycenter, rows - ycenter): # 입력 행렬 반복 순회
for j in range(xcenter, cols - xcenter):
y1, y2 = i - ycenter, i + ycenter + 1 # 관심영역 높이 범위
x1, x2 = j - xcenter, j + xcenter + 1 # 관심영역 너비 범위
roi = image[y1:y2, x1:x2].astype("float32") # 관심영역 형변환
tmp = cv2.multiply(roi, mask) # 회선 적용
dst[i, j] = cv2.sumElems(tmp)[0] # 출력화소 저장
return dst # 자료형 변환하여 반환
def differential(image, data1, data2):
# 입력 인자로 마스크 행렬 초기화
mask1 = np.array(data1, np.float32).reshape(3, 3)
mask2 = np.array(data2, np.float32).reshape(3, 3)
# 사용자 정의 회선 함수
dst1 = filter(image, mask1)
dst2 = filter(image, mask2)
dst = cv2.magnitude(dst1, dst2); # 회선 결과 두 행렬의 크기 계산
# dst1, dst2 = np.abs(dst1), np.abs(dst2) # 회선 결과 행렬 양수 변경
dst = cv2.convertScaleAbs(dst)
dst1 = cv2.convertScaleAbs(dst1)
dst2 = cv2.convertScaleAbs(dst2)
return dst, dst1, dst2
# 침식 연산
def erode(img, mask):
dst = np.zeros(img.shape, np.uint8)
if mask is None: mask = np.ones((3, 3), np.uint8)
mcnt = cv2.countNonZero(mask)
xcenter, ycenter = int(mask.shape[1]/2), int(mask.shape[0]/2) # 마스크 중심 좌표
for i in range(ycenter, img.shape[0] - ycenter): # 입력 행렬 반복 순회
for j in range(xcenter, img.shape[1] - xcenter):
# 마스크 영역
y1, y2 = i - ycenter, i + ycenter + 1 # 마스크 높이 범위
x1, x2 = j - xcenter, j + xcenter + 1 # 마스크 너비 범위
roi = img[y1:y2, x1:x2] # 마스크 영역
temp = cv2.bitwise_and(roi, mask)
cnt = cv2.countNonZero(temp) # 일치한 화소수 계산
dst[i, j] = 255 if (cnt == mcnt) else 0 # 출력 화소에 저장
return dst
def dilate(img, mask):
dst = np.zeros(img.shape, np.uint8)
if mask is None:
mask = np.ones((3, 3), np.uint8)
xcenter, ycenter = mask.shape[1] // 2, mask.shape[0]//2 # 마스크 중심 좌표
for i in range(ycenter, img.shape[0] - ycenter): # 입력 행렬 반복 순회
for j in range(xcenter, img.shape[1] - xcenter):
# 마스크 영역
y1, y2 = i - ycenter, i + ycenter + 1 # 마스크 높이 범위
x1, x2 = j - xcenter, j + xcenter + 1 # 마스크 너비 범위
roi = img[y1:y2, x1:x2] # 마스크 영역
# 행렬 처리 방식
temp = cv2.bitwise_and(roi, mask)
cnt = cv2.countNonZero(temp)
dst[i, j] = 0 if (cnt == 0) else 255 # 출력 화소에 저장
return dst |
990,354 | e0dabc6ad9b501311df22bf6de048ce5e20adda5 | from .conf import config
app_name = "microsoft_auth"
urlpatterns = []
if config.MICROSOFT_AUTH_LOGIN_ENABLED: # pragma: no branch
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^auth-callback/$",
views.AuthenticateCallbackView.as_view(),
name="auth-callback",
)
]
|
990,355 | 3b1f2ce98baf9d82a7d341b715aa38fc8f84aefa |
#-----------------------------
def test () :
return 'Zoe'
print (test()) # Zoe
#-----------------------------
a = 1
b = 2
def test ( a , b ) :
c = a + b
return c
print (test(a , b)) # 3
#-----------------------------
a = 'apple'
b = 'banana'
def test ( a , b ) :
return a
print (test(a , b)) # apple
#-----------------------------
def fortest ( a ) :
for b in range( 1 , 6 ) :
c = a * b
return c
d = fortest ( 5 )
print( d ) # 25 ,return 只會回傳一個值
#-----------------------------
def sad ( x , y ):
if x == y:
return x , y
else:
return 'so sad'
print (sad ( 2 , 5 )) #so sad
print (sad ( 4 , 4 )) #(4, 4)
#-----------------------------
def test ( a , b ):
c = a * b
return c
for d in range ( 1 , 4 ):
e = test ( 1+d , 10 )
print (e) # 1+1*10=20、1+2*10=30、1+3*10=40
#-----------------------------
|
990,356 | 126caab087097aebe661f9673bd02fd4449d7222 | """Quick Scan a folder or a bucket"""
# _______ ___ ___ ___ _______ ___ ___ _______ _______ _______ ______ _______ _______ ___
# | _ | Y | | _ | Y ) | _ | _ | _ | _ \ | _ | _ | |
# |. | |. | |. |. 1___|. 1 / | 1___|. 1___|. 1 |. | | |. 1 |. 1 |. |
# |. | |. | |. |. |___|. _ \ |____ |. |___|. _ |. | | |. _ |. ____|. |
# |: 1 |: 1 |: |: 1 |: | \ |: 1 |: 1 |: | |: | | |: | |: | |: |
# |::.. |::.. . |::.|::.. . |::.| . ) |::.. . |::.. . |::.|:. |::.| | |::.|:. |::.| |::.|
# `----|:.`-------`---`-------`--- ---' `-------`-------`--- ---`--- ---' `--- ---`---' `---'
# `--' Josh's Quick Scan experiment
#
# Scans either a folder or a S3 bucket using the CrowdStrike Falcon X Quick Scan and Sandbox APIs.
#
# Created // 04.12.21: jshcodes@CrowdStrike - In order to proceed, please insert 16oz of coffee.
#
# ===== NOTES REGARDING THIS SOLUTION ============================================================
#
# This is a proof of concept. Extensive performance testing has not been performed at this time.
#
# A VOLUME is a collection of files that are uploaded and then scanned as a singular batch.
#
# LOCAL DIRECTORY scanning: the folder is inventoried and then files are uploaded to the API in a
# linear fashion. This method is only impacted by data transfer speeds from the source file system
# location to CrowdStrike's cloud. Supports pattern matching to filter objects scanned using the
# "--pattern" or "-p" command line parameter.
#
# S3 BUCKET scanning: the bucket contents are inventoried, and then the contents are downloaded
# to local memory and uploaded to the Sandbox API in a linear fashion. This method does NOT store
# the files on the local file system. Due to the nature of this solution, the method is heavily
# impacted by data transfer speeds. Recommended deployment pattern involves running in AWS within
# a container, an EC2 instance or as a serverless lambda. Scans the entire bucket whether you like
# it or not. You must specify a target that includes the string "s3://" in order to scan a bucket.
#
# The log file rotates because cool kids don't leave messes on other people's file systems.
#
# This solution is dependant upon Amazon's boto3 library, and CrowdStrike FalconPy >= v0.8.7.
# python3 -m pip install boto3 crowdstrike-falconpy
#
# Example config.json file:
#
# {
# "falcon_client_id": "API ID GOES HERE",
# "falcon_client_secret": "API SECRET GOES HERE"
# }
#
#
# This solution has been tested on Python 3.7 / 3.9 running under Amazon Linux 2 and MacOS 10.15.
#
# ================================================================================================
# pylint: disable=E0401, R0903
#
import io
import os
import json
import time
import argparse
import logging
from logging.handlers import RotatingFileHandler
from pathlib import Path
# AWS Boto library
import boto3
# !!! Requires FalconPy v0.8.7+ !!!
# Authorization, Sample Uploads and QuickScan Service Classes
from falconpy import OAuth2, SampleUploads, QuickScan
class Analysis:
"""Class to hold our analysis and status."""
def __init__(self):
self.uploaded = []
self.files = []
self.scanning = True
# Dynamically create our payload using the contents of our uploaded list
self.payload = lambda: {"samples": self.uploaded}
class Configuration:
"""Class to hold our running configuration."""
def __init__(self, args):
self.config_file = "../config.json"
if args.config_file:
self.config_file = args.config_file
self.log_level = logging.INFO
if args.log_level:
if args.log_level.upper() in "DEBUG,WARN,ERROR".split(","):
if args.log_level.upper() == "DEBUG":
self.log_level = logging.DEBUG
elif args.log_level.upper() == "WARN":
self.log_level = logging.WARN
elif args.log_level.upper() == "ERROR":
self.log_level = logging.ERROR
self.target_pattern = "**/*.*"
if args.pattern:
self.target_pattern = args.pattern
self.scan_delay = 3
if args.check_delay:
try:
self.scan_delay = int(args.check_delay)
except ValueError:
# They gave us garbage, ignore it
pass
# Will stop processing if you give us a bucket and no region
self.region = None
if args.region:
self.region = args.region
# Target directory or bucket to be scanned
if "s3://" in args.target:
self.target_dir = args.target.replace("s3://", "")
self.bucket = True
else:
self.target_dir = args.target
self.bucket = False
def upload_samples():
"""Upload the samples identified within the target to the Sandbox API."""
# Retrieve a list of all files and paths within the target
paths = Path(Config.target_dir).glob(Config.target_pattern)
# Inform the user as to what we're doing
logger.info("Assembling %s volume for submission", Config.target_dir)
# Loop through each identified file and upload it to the sandbox for analysis
for path in paths:
# Convert the path to a string
filepath = str(path)
# Grab the file name
filename = os.path.basename(filepath)
# Open the file for binary read, this will be our payload
with open(filepath, 'rb') as upload_file:
payload = upload_file.read()
# Upload the file using the Sandbox
response = Samples.upload_sample(file_name=filename, sample=payload)
# Grab the SHA256 unique identifier for the file we just uploaded
sha = response["body"]["resources"][0]["sha256"]
# Add this SHA256 to the volume payload element
Analyzer.uploaded.append(sha)
# Track the upload so we can remove the file when we're done
Analyzer.files.append([filename, filepath, sha])
# Inform the user of our progress
logger.debug("Uploaded %s to %s", filename, sha)
def upload_bucket_samples():
"""Retrieve keys from a bucket and then uploads them to the Sandbox API."""
if not Config.region:
logger.error("You must specify a region in order to scan a bucket target")
raise SystemExit(
"Target region not specified. Use -r or --region to specify the target region."
)
# Connect to S3 in our target region
s_3 = boto3.resource("s3", region_name=Config.region)
# Connect to our target bucket
bucket = s_3.Bucket(Config.target_dir)
# Retrieve a list of all objects in the bucket
summaries = bucket.objects.all()
# Inform the user as this may take a minute
logger.info("Assembling volume from target bucket (%s) for submission", Config.target_dir)
# Loop through our list of files, downloading each to memory then upload them to the Sandbox
for item in summaries:
# Grab the file name from the path
filename = os.path.basename(item.key)
# Teensy bit of witch-doctor magic to download the file
# straight into the payload used for our upload to the Sandbox
response = Samples.upload_sample(file_name=filename,
file_data=io.BytesIO(
bucket.Object(key=item.key).get()["Body"].read()
)
)
# Retrieve our uploaded file SHA256 identifier
sha = response["body"]["resources"][0]["sha256"]
# Add this SHA256 to the upload payload element
Analyzer.uploaded.append(sha)
# Track the upload so we recognize the file when we're done
Analyzer.files.append([filename, item.key, sha])
# Inform the user of our progress
logger.debug("Uploaded %s to %s", filename, sha)
def scan_uploaded_samples() -> dict:
"""Retrieve a scan using the ID of the scan provided by the scan submission."""
while Analyzer.scanning:
# Retrieve the scan results
scan_results = Scanner.get_scans(ids=scan_id)
try:
if scan_results["body"]["resources"][0]["status"] == "done":
# Scan is complete, retrieve our results
results = scan_results["body"]["resources"][0]["samples"]
# and break out of the loop
Analyzer.scanning = False
else:
# Not done yet, sleep for a bit
time.sleep(Config.scan_delay)
except IndexError:
# Results aren't populated yet, skip
pass
return results
def report_results(results: dict):
"""Retrieve the scan results for the submitted scan."""
# Loop thru our results, compare to our upload and return the verdict
for result in results:
for item in Analyzer.files:
if result["sha256"] == item[2]:
if "no specific threat" in result["verdict"]:
# File is clean
logger.info("Verdict for %s: %s", item[1], result["verdict"])
else:
# Mitigation would trigger from here
logger.warning("Verdict for %s: %s", item[1], result["verdict"])
def clean_up_artifacts():
"""Remove uploaded files from the Sandbox."""
logger.info("Removing artifacts from Sandbox")
for item in Analyzer.uploaded:
# Perform the delete
response = Samples.delete_sample(ids=item)
if response["status_code"] > 201:
# File was not removed, log the failure
logger.warning("Failed to delete %s", item)
else:
logger.debug("Deleted %s", item)
logger.info("Artifact cleanup complete")
def parse_command_line():
"""Parse any inbound command line arguments and set defaults."""
parser = argparse.ArgumentParser("Falcon Quick Scan")
parser.add_argument("-f", "--config",
dest="config_file",
help="Path to the configuration file",
required=False
)
parser.add_argument("-l", "--log-level",
dest="log_level",
help="Default log level (DEBUG, WARN, INFO, ERROR)",
required=False
)
parser.add_argument("-d", "--check-delay",
dest="check_delay",
help="Delay between checks for scan results",
required=False
)
parser.add_argument("-p", "--pattern",
dest="pattern",
help="Target file patterns to scan (defaults to *.*)",
required=False
)
parser.add_argument("-r", "--region",
dest="region",
help="Region the target bucket resides in",
required=False
)
parser.add_argument("-t", "--target",
dest="target",
help="Target folder or bucket to scan. Bucket must have 's3://' prefix.",
required=True
)
return parser.parse_args()
def load_api_config():
"""Grab our config parameters from our provided config file (JSON format)."""
with open(Config.config_file, 'r', encoding="utf-8") as file_config:
conf = json.loads(file_config.read())
return OAuth2(client_id=conf["falcon_client_id"],
client_secret=conf["falcon_client_secret"]
)
def enable_logging():
"""Configure logging."""
logging.basicConfig(level=Config.log_level,
format="%(asctime)s %(name)s %(levelname)s %(message)s"
)
# Create our logger
log = logging.getLogger("Quick Scan")
# Rotate log file handler
rfh = RotatingFileHandler("falcon_quick_scan.log", maxBytes=20971520, backupCount=5)
# Log file output format
f_format = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
# Set the log file output level to INFO
rfh.setLevel(Config.log_level)
# Add our log file formatter to the log file handler
rfh.setFormatter(f_format)
# Add our log file handler to our logger
log.addHandler(rfh)
return log
if __name__ == '__main__':
# Parse the inbound command line parameters and setup our running Config object
Config = Configuration(parse_command_line())
# Activate logging
logger = enable_logging()
# Grab our authentication object
auth = load_api_config()
# Connect to the Samples Sandbox API
Samples = SampleUploads(auth_object=auth)
# Connect to the Quick Scan API
Scanner = QuickScan(auth_object=auth)
# Create our analysis object
Analyzer = Analysis()
# Log that startup is done
logger.info("Process startup complete, preparing to run scan")
# Upload our samples to the Sandbox
if Config.bucket:
# S3 bucket
upload_bucket_samples()
else:
# Local folder
upload_samples()
# Submit our volume for analysis and grab the id of our scan submission
scan_id = Scanner.scan_samples(body=Analyzer.payload())["body"]["resources"][0]
# Inform the user of our progress
logger.info("Scan %s submitted for analysis", scan_id)
# Retrieve our scan results from the API and report them
report_results(scan_uploaded_samples())
# Clean up our uploaded files from out of the API
clean_up_artifacts()
# We're done, let everyone know
logger.info("Scan completed")
# __ ____ ___ __ __ ___ _____ ______ ____ ____ __ _ ___
# / ]| \ / \ | T__T T| \ / ___/| T| \ l j| l/ ] / _]
# / / | D )Y Y| | | || \( \_ | || D ) | T | ' / / [_
# / / | / | O || | | || D Y\__ Tl_j l_j| / | | | \ Y _]
# / \_ | \ | |l ` ' !| |/ \ | | | | \ | | | Y| [_
# \ || . Yl ! \ / | |\ | | | | . Y j l | . || T
# \____jl__j\_j \___/ \_/\_/ l_____j \___j l__j l__j\_j|____jl__j\_jl_____j
#
# ████
# ████░░ ████▒▒████
# ██▒▒▒▒████████▒▒▒▒▒▒████
# ██▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒██
# ██▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒██
# ██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██▒▒▒▒▒▒▒▒▒▒▒▒██████
# ██▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒████
# ░░ ████▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒██▒▒
# ████ ██████▒▒▒▒▒▒▒▒▒▒▒▒▒▒██░░██▒▒▒▒██
# ████▒▒████████████▒▒▒▒▒▒▒▒▒▒▓▓██░░██▓▓██ Yup! That's a file...
# ██░░▒▒██████ ░░████████▒▒▒▒▒▒▒▒██░░░░██
# ▒▒██░░░░██▓▓ ██░░██████████▒▒▒▒▒▒▒▒██░░██
# ██▒▒░░░░██▒▒░░ ████░░ ████▒▒▒▒▒▒██▒▒██
# ████░░░░░░░░████▒▒░░██░░░░██░░░░██▒▒▒▒▒▒████
# ██▒▒░░░░░░░░██▒▒░░░░██░░▒▒██████ ████▒▒▒▒██
# ██░░░░░░░░░░▓▓██░░░░██░░ ██ ██▒▒██▓▓▒▒██
# ████████▒▒░░░░░░░░██░░░░░░░░░░██ ████▒▒██▒▒██
# ░░████▒▒░░░░▓▓██▓▓░░░░░░▓▓░░░░░░░░░░ ██ ░░░░██ ██▓▓▓▓
# ▒▒████░░░░░░░░░░████░░░░░░░░▒▒░░░░░░░░░░░░████████▒▒ ████ ██████
# ▒▒██░░░░░░░░░░░░░░░░████░░░░░░██░░░░░░░░░░░░░░ ░░░░██░░ ▒▒ ████▒▒██████
# ████░░░░░░░░░░░░░░░░░░░░██░░░░░░██░░░░░░████████░░░░░░░░██ ██░░████████ ██
# ████████░░░░░░░░░░░░░░░░░░░░░░░░██░░░░██ ░░██████████░░░░░░░░░░██ ██▒▒████░░░░ ██
# ██░░████░░░░░░░░░░░░░░░░░░░░░░██████░░░░░░██░░ ████████░░░░░░░░░░██ ██████░░ ░░██ ██
# ██░░░░██░░░░░░░░░░░░░░░░░░░░░░░░░░░░██▓▓░░░░██░░░░░░████████░░░░░░ ██ ░░██░░██░░ ██ ██
# ████████ ██░░░░██░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░████░░██▒▒██████▒▒████████░░██ ▒▒██░░░░░░ ██ ██
# ████░░░░░░████▒▒░░████░░░░░░░░░░░░░░░░▒▒░░░░░░░░░░░░▓▓██████ ██ ░░ ██ ░░██ ░░████░░░░██
# ░░██░░░░░░░░░░██░░░░████░░░░░░░░░░░░░░██░░░░░░░░░░░░░░████ ██ ██░░ ▒▒▒▒░░░░░░██
# ██████▒▒░░░░████░░██░░░░░░██░░░░░░░░██░░░░░░░░░░░░░░░░██ ██ ██▒▒ ░░████░░░░██
# ████▒▒░░░░░░██████░░░░██░░░░░░████████░░░░░░░░░░░░░░░░██ ██ ████░░░░██████████
# ████░░░░░░░░░░░░██░░░░██████░░░░░░░░░░████░░░░░░░░░░░░░░░░████ ██ ██░░░░██ ▒▒████
# ██░░░░░░░░░░░░░░░░██░░░░░░████░░░░░░░░██▒▒░░░░░░░░░░░░░░▒▒████ ▒▒████████████░░████
# ▒▒██░░░░░░░░░░░░░░██▓▓░░░░░░░░████████████░░░░░░░░░░░░░░░░██████ ▓▓▒▒██████ ▒▒██████
# ████░░░░░░░░░░░░░░██▒▒░░░░░░████░░████▒▒██░░░░░░░░░░░░▓▓██░░██████▒▒██ ▒▒██████████
# ████░░░░░░░░░░████████▒▒██░░░░▒▒██ ██▒▒██░░░░░░░░░░██░░ ██░░░░░░██████████████
# ▒▒██░░░░░░░░░░░░░░██████░░░░██▒▒▒▒▒▒░░██░░░░░░░░██░░ ██░░░░░░▒▒████████
# ▒▒████░░░░░░░░░░░░░░████████ ▒▒▒▒ ████░░░░██ ░░██░░░░▒▒████████
# ████████▒▒░░░░░░░░░░░░▒▒████████████████████ ██▒▒░░░░░░████████
# ██████████████████▒▒░░░░░░░░██ ████████░░░░░░░░░░░░░░██████
# ████████████████████████▒▒██ ▒▒ ██▒▒▓▓░░░░░░░░ ░░░░████
# ████████████████████████████ ██░░░░░░░░██▒▒ ██
# ████████ ████████████████▒▒ ██ ░░░░░░░░██████
# ██████████████████ ██████████████ ████░░░░██░░░░██
# ██████████████████ ████████████ ██████░░░░██████
# ██████████████████ ████████████ ████████████
# ▓▓██████████████████▒▒ ░░████████████ ▓▓
# ████████████████▒▒ ████████████▓▓
# ░░░░████████████▒▒░░░░░░ ▒▒██████████████
# ░░░░░░░░░░██████░░░░░░░░░░░░░░██████████████
# ░░░░░░░░░░████████████░░░░░░░░██████████████
# ░░░░░░░░░░████████████░░░░░░░░████████████████████
# ░░░░░░░░░░░░▒▒░░░░░░░░░░░░░░░░████████████████████████
|
990,357 | 94a95ddd23a5f3e10f55b7c5bdbf18bd22155ad4 | #!/usr/bin/python
#import time
import os, sys, fnmatch
import shutil
from subprocess import Popen, PIPE
def invoke_klee():
# invoke bytecode from "BC" folder
bcfiles = []
for path, dir, files in os.walk(os.path.abspath(os.getcwd()+"/bcfiles")):
for filename in fnmatch.filter(files, "*.bc"):
bcfiles.append(os.path.join(path, filename))
print(bcfiles)
if not os.path.exists("IOfiles"):
os.mkdir("IOfiles")
else:
shutil.rmtree("IOfiles")
os.mkdir("IOfiles")
print("set LD_LIBRARY_PATH")
command1 = "export LD_LIBRARY_PATH=/home/klee/klee_nuild/klee/lib"
process1 = Popen(command1, shell=True, stdout=PIPE)
outs, errs = process1.communicate()
print(outs)
print(errs)
for file in bcfiles:
print(">>>>>>>>>>>>>>>>>>>>>>>>>> invoke klee on " + file)
# use the aliased command instead of the alias "klee"
command = "klee --libc=uclibc --posix-runtime " + file
# command = "klee " + file
process = Popen(command, shell=True, stdout=PIPE)
outs, errs = process.communicate()
print(outs)
print(errs)
testcases = []
filepath, filename = os.path.split(file)
# print(filepath)
symfilepath = filepath + '/klee-last'
#find symlink target of klee-last
targetDir = os.readlink(symfilepath)
for path, dir, files in os.walk(os.path.abspath(targetDir)):
for filename in fnmatch.filter(files, "*.ktest"):
testcases.append(os.path.join(path, filename))
# print(testcases)
filepath, filename = os.path.split(file)
filename = str.split(filename, '.')[0][0:-1]
print(">> extract input-output pairs to filename: " + filename)
# read test case input
input=[]
output=[]
for i in range(len(testcases)):
command1 = "ktest-tool " + testcases[i]
process1 = Popen(command1, shell=True, stdout=PIPE, stderr=PIPE)
outs1, errs1 = process1.communicate()
# print(errs1)
outs1=outs1.decode()
name = "temp.txt"
fd = open(name, 'w')
fd.write(outs1)
fd.close()
inputArgs = getInputData(name)
# print("input arguments:")
# print(inputArgs)
input.append(inputArgs)
#replay tese case to get output
command2 = "make -s replay MAIN="
command2 += filename + ";"
#command2+= " -lkleeRuntest;"
command2 += "KTEST_FILE="+testcases[i]+" ./a.out;"
command2 += "echo $?;"
print(command2)
process2 = Popen(command2, shell=True, stdout=PIPE, stderr=PIPE)
outs, errs = process2.communicate()
print(outs)
print(errs)
outs = outs.decode()
# print("raw:"+outs)
testOut = getOutputData(outs)
# print("after:::"+testOut)
output.append(testOut)
print(">> input-output pairs num %d"%len(testcases))
outFilename = "IOfiles/" + filename + ".txt"
print(">> input-output paris are saved in "+ outFilename)
fd = open(outFilename, 'w')
fd.write(str(len(testcases)) + '\n')
for i in range(len(testcases)):
# fd.write("test %d\n"%i)
# fd.write("input:")
for _list in input[i]:
for _str in _list:
fd.write(_str)
fd.write("\n")
fd.write("output:")
fd.write(output[i])
fd.write("\n")
fd.close()
# command3 = "KTEST_FILE=klee/last/test000001.ktest ./a.out"
print("************************** klee execution on " + file + " done")
# return
def getInputData(name):
fd = open(name,'r')
line = fd.readline()
objects = []
while line:
if "data:" in line:
args = line.split("data: ")
objects.append(args[-1])
line = fd.readline()
#print(">>>>>>>>>>before>>>>>>>objects:")
#print(objects)
newStr = []
for value in objects:
newStr.append(value[2:-2].replace("\\x", ""));
#print(">>>>>>>>after>>>>>>>objects:")
#print(newStr)
return newStr[1:]
def getOutputData(printout):
# print("before replace:"+printout)
printout = printout.split('\n')[0]
# print("after replace:"+printout)
output = printout.split("output:")
# print("after split:"+output[-1])
return output[-1]
if __name__ == "__main__":
invoke_klee()
#print "return code:" + rc
|
990,358 | d682f2d8c6553a7607a6b64f572aeab869a24add | import command_system
import vkapi
import db
import keyboards as kb
import settings
def next(vk_id, body):
if vk_id not in settings.admin_ids:
return None, None
if not body.startswith('/пригласить'):
return None, None
year = body.split()[1]
msg = 'Добрый вечер!\n\n'
msg += 'В прошлом году вы голосовали здесь на выборах в Студсовет.\n'
msg += 'В этом году мы приглашаем снова проголосовать за представителей вашего курса.\n\n'
msg += 'Для вас процедура авторизации в боте упрощена -- вам не придётся вводить ФИО и номер студенческого!\n\n'
msg += 'Если вы хотите проголосовать, отправьте 1.\n\n'
msg += 'Если вы хотите отправить сообщение в Студенческий совет ВМК, отправьте 2.'
users = db.get_prev_novote_users(year)
if users:
vkapi.send_message_to_users(users, msg, keyboard = kb.get_board('0', '-1'))
return 'Оповещено пользователей: ' + str(len(users)), None
command= command_system.AdmCommand()
command.description = 'Отправить приглашение'
command.process = next
'''import command_system
import vkapi
import db
import keyboards as kb
import settings
def next(vk_id, body):
if vk_id not in settings.admin_ids:
return None
keys = ['/оповестить!']
if body not in keys:
return None,
msg = 'Добрый день!\n\n'
msg += 'Вы не закончили процесс голосования.\n'
msg += 'Меньше суток осталось до завершения выборов. Вы всё ещё можете проголосовать здесь.\n\n'
msg += 'Если у вас возникли какие-то затруднения, вы можете написать @anton.hand (Антону) и мы решим проблему.\n\n'
users = db.get_users_by_state('2')
if users:
vkapi.send_message_to_users(users, msg, keyboard = kb.get_board('2', '-1'))
return 'Оповещено пользователей: ' + str(len(users)), None
command= command_system.AdmCommand()
command.description = 'Отправить приглашение'
command.process = next''' |
990,359 | cf6ec39c1b090382ebdd6b01472c36e535108dff | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 19 20:00:25 2019
@author: Mohammad Shahin
"""
def interlaced(st1,st2):
print(len(st2))
small=''
if len(st1)>len(st2):
x=st1[len(st2):]
small=st2[:]
elif len(st2)>len(st1):
x=st2[len(st1):]
small=st1[:]
res=''
for i in range(len(small)):
res+=st1[i]+st2[i]
print(res+x)
interlaced("aaaa","bb")
|
990,360 | 2c3973431f55f27f19154ec3addbfb0e5104d66e | from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import xlrd
import openpyxl
pd.set_option('display.max_columns', None)
import numpy as np
from datetime import date, datetime
import time
import os
# driver = webdriver.Chrome(executable_path='/Users/himanshuaggarwal/PycharmProjects/webscraping/chromedriver')
driver = webdriver.Firefox(executable_path='/Users/himanshuaggarwal/PycharmProjects/webscraping/geckodriver')
files = os.listdir()
for file in files:
if 'Results for UX' in file or 'Results for Data' in file or 'Results for Web Devs' in file:
os.remove(file)
# job_filter = input('enter the job type filter')
# flag = True
# while flag:
# print('You have entered', job_filter)
# print('If this is correct press 1')
# choice = int(input('Enter Choice'))
# if choice == 1:
# flag = False
# else:
# print('Your Choice was', choice)
# job_filter = input('enter the job type filter')
def get_data(link):
# This is to close a pop up window if it appears on the screen
time.sleep(6)
try:
if driver.find_elements_by_class_name('popover-foreground') is not None:
if len(driver.find_elements_by_class_name('popover-foreground')) > 0:
driver.find_elements_by_id('popover-link-x')[0].click()
except:
pass
finally:
html = requests.get(link).content
soup = BeautifulSoup(html, 'html.parser')
jobpostings = soup.find_all('div', {'data-tn-component': 'organicJob'})
title = []
company = []
location = []
summary = []
salary_snippet = []
day_posted = []
job_posting_link = []
for posting in jobpostings:
temp = posting.find('a', {'class': 'jobtitle turnstileLink'})['href']
if posting.find('div', {'class': 'title'}) is not None and len(re.findall('jk.*', temp)) > 0:
title.append(posting.find('div', {'class': 'title'}).text.strip())
if posting.find('div', {'class': 'sjcl'}) is not None:
temp = posting.find('div', {'class': 'sjcl'}).text.strip().split('\n')
sjcl = []
for item in temp:
if item != '':
sjcl.append(item)
company.append(sjcl[0])
location.append(sjcl[-1])
else:
company.append('NA')
location.append('NA')
if posting.find('a', {'class':'jobtitle turnstileLink'}) is not None:
temp = posting.find('a', {'class':'jobtitle turnstileLink'})['href']
try:
temp = 'https://www.indeed.com/viewjob?' + re.findall('jk.*', temp)[0]
job_posting_link.append(temp)
except:
pass
else:
job_posting_link.append('NA')
if posting.find('div', {'class': 'summary'}) is not None:
summary.append(posting.find('div', {'class': 'summary'}).text.strip())
else:
summary.append('NA')
if posting.find('div', {'class': 'salarysnippet'}) is not None:
salary_snippet.append(posting.find('div', {'class': 'salarysnippet'}))
else:
salary_snippet.append('NA')
if posting.find('div', {'class': 'jobsearch-SerpJobCard-footerActions'}) is not None:
day_posted.append(
posting.find('div', {'class': 'jobsearch-SerpJobCard-footerActions'}).text.strip().split(' -')[
0].strip())
else:
day_posted.append('NA')
temp_df = pd.DataFrame()
temp_df['Title'] = title
temp_df['Company'] = company
temp_df['Summary'] = summary
temp_df['Location'] = location
temp_df['DayPosted'] = day_posted
temp_df['JobPostingLink'] = job_posting_link
return temp_df
jobs = ['Data Analysis', 'Data Analyst', 'Developer', 'Software Engineer', 'UX', 'Product Designer', 'UX/UI']
for job in jobs:
driver.get('https://www.indeed.com')
# Deleting anything that might be there in the cell and input new values for Job Type
driver.find_element_by_id('text-input-what').send_keys(Keys.COMMAND + "a")
driver.find_element_by_id('text-input-what').send_keys(Keys.DELETE)
driver.find_elements_by_id('text-input-what')[0].send_keys(job)
time.sleep(6)
# location = input('enter the location filter')
# flag = True
# while flag:
# print('You have entered', location)
# print('If this is correct press 1')
# choice = int(input('Enter Choice'))
# if choice == 1:
# flag = False
# else:
# print('Your Choice was', choice)
# location = input('enter the location filter')
# Deleting anything that might be there in the cell and input new values for Location
driver.find_element_by_id('text-input-where').send_keys(Keys.COMMAND + "a")
driver.find_element_by_id('text-input-where').send_keys(Keys.DELETE)
driver.find_elements_by_id('text-input-where')[0].send_keys('Miami, FL')
time.sleep(3)
# Since the clicks was not working. We used the return key in the same cell
# driver.find_elements_by_class_name('icl-WhatWhere-buttonWrapper')[0].click()
driver.find_elements_by_id('text-input-where')[0].send_keys(Keys.RETURN)
time.sleep(3)
driver.find_elements_by_partial_link_text('Advanced Job Search')[0].click()
select = Select(driver.find_element_by_id('radius'))
# select by visible text
# select.select_by_visible_text('within 50 miles of')
# select by value
select.select_by_value('50')
select = Select(driver.find_element_by_id('fromage'))
select.select_by_value('7')
select = Select(driver.find_element_by_id('limit'))
select.select_by_value('50')
time.sleep(2)
driver.find_element_by_id('fj').click()
data = pd.DataFrame(columns=['Title', 'Company', 'Summary', 'Location', 'DayPosted', 'JobPostingLink'])
time.sleep(3)
try:
if driver.find_elements_by_class_name('popover-foreground') is not None:
if len(driver.find_elements_by_class_name('popover-foreground')) > 0:
driver.find_elements_by_id('popover-link-x')[0].click()
print('PopOver Eliminated')
time.sleep(4)
finally:
print('Starting Scraping')
FLAG = True
counter = 1
while FLAG:
try:
if driver.find_elements_by_class_name('popover-foreground') is not None:
if len(driver.find_elements_by_class_name('popover-foreground')) > 0:
driver.find_elements_by_id('popover-link-x')[0].click()
print('PopOver Eliminated')
time.sleep(4)
except:
pass
if driver.current_url is not None:
print(driver.current_url)
print(counter)
counter += 1
# Go to the next page. We are using find_element instead of elements so that we only go to the next page
url = driver.current_url
scraped_page_df = get_data(url)
data = pd.concat([data, scraped_page_df], axis=0)
try:
time.sleep(3)
driver.find_element_by_class_name('pn').click()
except:
print('Next page was not found or click did not word')
FLAG = False
else:
FLAG = False
print('Exit - url not found')
if data.shape[0] > 150:
FLAG = False
print('Exit - Got 100 rows')
data['Cohort'] = job
data['Date'] = date.today()
data['Date & Time'] = datetime.now()
data['DayPosted'] = list(map(lambda x: x[0:11], data['DayPosted']))
data = data.reset_index()
print(data)
# Saving to a csv file
if 'ux' in job.lower():
if '/' in job:
data.to_excel('Results for UXUI.xlsx', sheet_name='Sheet')
else:
data.to_excel('Results for UX.xlsx', sheet_name=job)
elif 'designer' in job.lower():
data.to_excel('Results for Product Designer.xlsx', sheet_name=job)
elif 'analysis' in job.lower():
data.to_excel('Results for Data Analysis.xlsx', sheet_name=job)
elif 'analyst' in job.lower():
data.to_excel('Results for Data Analyst.xlsx', sheet_name=job)
elif 'developer' in job.lower():
data.to_excel('Results for Web Developers.xlsx', sheet_name=job)
elif 'software' in job.lower():
data.to_excel('Results for Software Engineer.xlsx', sheet_name=job)
df1 = pd.read_excel('Results for Data Analysis.xlsx')
df2 = pd.read_excel('Results for Data Analyst.xlsx')
df = pd.concat([df1, df2], axis=0)
df = df.drop_duplicates().reset_index()
cols = list(df.columns)
index = cols.index('index')
for i in range(index):
df = df.drop(cols[i], axis=1)
df = df.drop('Date', axis=1)
df.to_excel('DATA_final.xlsx', sheet_name='DataCohortSheet')
df1 = pd.read_excel('Results for UXUI.xlsx')
df2 = pd.read_excel('Results for UX.xlsx')
df = pd.concat([df1, df2], axis=0)
df = df.drop_duplicates().reset_index()
cols = list(df.columns)
index = cols.index('index')
for i in range(index):
df = df.drop(cols[i], axis=1)
df = df.drop('Date', axis=1)
df.to_excel('UX_final.xlsx', sheet_name='UXCohortSheet')
files = os.listdir()
for file in files:
if 'Results for UX' in file or 'Results for Data' in file:
os.remove(file) |
990,361 | 9821623c705844f9159abd7122d77485faa34041 | from flask import render_template, flash, redirect, url_for
from app import app
from app.forms import ContactMeForm
from flask_pymongo import PyMongo
mongo = PyMongo(app)
@app.route('/')
@app.route('/index')
def index():
user = {'username': 'Imran'}
posts = [
{
'author': {'username': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'username': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html', title='Web App Home', user=user, posts=posts)
@app.route('/home')
def home():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contactme', methods=['GET', 'POST', 'DELETE', 'PATCH'])
def contactme():
form = ContactMeForm()
if form.validate_on_submit():
contact_collection = mongo.db.contact
flash('Thanks for sending a message, I will reply as soon as possible.')
print('This worked I think')
#flash('Thanks for sending your message firstname={} lastname={}, I appreciate you reaching out'.format(form.firstname.data, form.lastname.data))
contact_collection.insert_one({'firstname' :form.firstname.data, 'lastname':form.lastname.data, 'email':form.email.data, 'address':form.address.data, 'city':form.city.data,'country':form.country.data,'reason':form.reasons.data, 'message':form.messagebody.data})
return redirect('/index')
return render_template('contactme.html', title='Contact Me', form=form)
|
990,362 | 103dcda6f719691d657ec0986095a5dd54aa5038 | import math
import time
colors = ['\033[95m', '\033[94m', '\033[92m', '\033[91m', '\033[99m']
for cycleCount in range(1,4):
frequencies = [10, 25, 30, 98]
times = [.001, .01, .0001, .005]
for timeOffset in times:
for frequency in frequencies:
for color in colors:
for cycle in range(0, int(math.pi*2*frequency)):
print(color + ' ' * int(math.cos(cycle/frequency)*20+30) + "A")
time.sleep(timeOffset)
|
990,363 | 3874189b9662d11b43d49eb1bae5a1379a5d47ef | from pycqed.measurement.calibration.automatic_calibration_routines.base import\
update_nested_dictionary
from .base_step import Step, IntermediateStep
from pycqed.measurement.calibration import single_qubit_gates as qbcal
from pycqed.measurement.quantum_experiment import QuantumExperiment
from pycqed.utilities.general import temporary_value
from pycqed.utilities.reload_settings import reload_settings
import pycqed.analysis.analysis_toolbox as a_tools
from typing import List, Any, Tuple, Dict, Type, Optional
from warnings import warn
import numpy as np
import copy
import logging
import pprint
import inspect
ROUTINES = 'Routines'
log = logging.getLogger(ROUTINES)
log.setLevel('INFO')
try:
from pycqed.utilities import devicedb
except ModuleNotFoundError:
log.info("The module 'device-db-client' was not successfully imported. "
"The device database features will not be available.")
_device_db_client_module_missing = True
else:
_device_db_client_module_missing = False
class RoutineTemplate(list):
"""Class to describe templates for (calibration) routines.
The class is essentially a list of lists that contain a class, a label, and
the corresponding settings of a step in a routine. Steps may be
measurements, calibration routines, or intermediate steps.
"""
def __init__(self,
steps,
global_settings=None,
routine=None,
):
"""Initialize the routine template.
Args:
steps (list of :obj:`Step`): List of steps that define the routine.
Each step consists of a list of three elements. Namely, the step
class, the step label (a string), and the step settings (a
dictionary). It can optionally also have a fourth element to
give the experiment temporary values.
For example:
steps = [
[StepClass1, step_label_1, step_settings_1],
[StepClass2, step_label_2, step_settings_2,
step_tmp_vals_2],
]
global_settings (dict, optional): Dictionary containing global
settings for each step of the routine (e.g., "dev", "update",
"delegate_plotting"). Defaults to None.
routine (:obj:`AutomaticCalibrationRoutine`, optional): Routine that
the RoutineTemplate defines. Defaults to None.
"""
super().__init__(steps)
if routine is not None:
self.routine = routine
if global_settings is not None:
self.global_settings = global_settings
else:
self.global_settings = {}
def get_step_class_at_index(self, index):
"""Returns the step class for a specific step in the routine template.
Args:
index (int): Index of the step for which the settings are to be
returned.
Returns:
class: The class of the step at position 'index' in the routine
template.
"""
return self[index][0]
def get_step_label_at_index(self, index):
"""Returns the step label for a specific step in the routine template.
Args:
index (int): Index of the step for which the step label is to be
returned.
Returns:
str: The label of the step at position 'index' in the routine
template.
"""
return self[index][1]
def get_step_settings_at_index(self, index):
"""Returns the settings for a specific step in the routine template.
Args:
index (int): Index of the step for which the settings are to be
returned.
Returns:
dict: The settings dictionary of the step at position 'index' in
the routine template.
"""
settings = {}
settings.update(copy.copy(self.global_settings))
settings.update(copy.copy(self[index][2]))
return settings
def get_step_tmp_vals_at_index(self, index):
"""Returns the temporary values of the step at index.
Args:
index (int): Index of the step for which the settings are to be
returned.
Returns:
list: The temporary values for the step at position 'index' in the
routine template. Each entry is a tuple made of a
QCoDeS parameter and its temporary value.
"""
try:
return self[index][3]
except IndexError:
return []
def extend_step_tmp_vals_at_index(self, tmp_vals, index):
"""Extends the temporary values of the step at index. If the step does
not have any temporary values, it sets the temporary values to the
passed temporary values.
Args:
tmp_vals (list): The temporary values for the step at position
'index' in the routine template. Each entry should be a tuple
made of a QCoDeS parameter and its temporary value.
index (int): Index of the step for which the temporary values should
be used.
"""
try:
self[index][3].extend(tmp_vals)
except IndexError:
self[index].append(tmp_vals)
def update_settings_at_index(self, settings, index):
"""Updates the settings of the step at position 'index'.
Args:
settings (dict): The new settings that will update the existing
settings of the step at position 'index' in the routine
template.
index (int): Index of the step for which the temporary values should
be used.
"""
self[index][2].update(settings)
def update_all_step_settings(self, settings):
"""Updates all settings of all steps in the routine.
Args:
settings (dict): The new settings that will update the existing
settings of all the steps.
"""
for i, x in enumerate(self):
self.update_settings_at_index(settings, index=i)
def update_settings(self, settings_list):
"""Updates all settings of the routine. Settings_list must be a list of
dictionaries of the same length as the routine.
Args:
settings_list (list): List of dictionaries, where each entry will
update the existing settings of the corresponding step of the
routine. Must be of the same length as the routine.
"""
for i, x in enumerate(settings_list):
self.update_settings_at_index(settings=x, index=i)
def view(
self,
**kws
):
"""DEPRECATED."""
warn('This method is deprecated, use `Routine.view()` instead',
DeprecationWarning, stacklevel=2)
def __str__(self):
"""Returns a string representation of the routine template.
FIXME: this representation does not include the tmp_vals of the steps.
"""
s = ""
for i, x in enumerate(self):
s += f"Step {i}, {x[0].__name__}, {x[1]}\n"
return s
def step_name(self, index):
"""Returns the name of the step at position 'index'.
Args:
index (int): Index of the step whose name will be returned.
Returns:
str: The label of the step or the name of its class.
"""
step_label = self.get_step_label_at_index(index)
if step_label is not None:
return step_label
return self.get_step_class_at_index(index).get_lookup_class().__name__
def add_step(self,
step_class,
step_label,
step_settings,
step_tmp_vals=None,
index=None):
"""Adds a step to the routine template.
Args:
step_class (class): Class of the step
step_label (str): Label of the step
step_settings (dict): Settings of the step.
step_tmp_vals (list, optional): Temporary values for the step. Each
entry should be a tuple made of a QCoDeS parameter and its
temporary value. Defaults to None.
index (int, optional): Index of the routine template at which the
step should be added. If None, the step will be added at the end
of the routine. Defaults to None.
"""
if step_tmp_vals is None:
step_tmp_vals = []
if index is None:
super().append(
[step_class, step_label, step_settings, step_tmp_vals])
else:
super().insert(
index, [step_class, step_label, step_settings, step_tmp_vals])
@staticmethod
def check_step(step):
"""Check that a step is properly built.
Args:
step (list): Routine template step.
"""
assert isinstance(step, list), "Step must be a list"
assert (len(step) == 3 or len(step) == 4), \
"Step must be a list of length 3 or 4 (to include temporary values)"
assert isinstance(step[0], type), (
"The first element of the step "
"must be a class (e.g. measurement or a calibration routine)")
assert isinstance(step[2],
dict), ("The second element of the step "
"must be a dictionary containing settings")
def __getitem__(self, i):
"""Overloading of List.__getitem__ to ensure type RoutineTemplate is
preserved.
Args:
i: index or slice
Returns:
Element or new RoutineTemplate instance
"""
new_data = super().__getitem__(i)
if isinstance(i, slice):
new_data = self.__class__(new_data)
new_data.global_settings = copy.copy(self.global_settings)
return new_data
class AutomaticCalibrationRoutine(Step):
"""Base class for general automated calibration routines
NOTE: In the children classes, it is necessary to call final_init at
the end of their constructor. It is not possible to do this in the
parent class because some routines need to do some further initialization
after the base class __init__ and before the routine is actually created
(which happens in final_init).
In the children classes, the initialization follows this hierarchy:
ChildRoutine.__init__
AutomaticCalibrationRoutine.__init__
final_init
create_initial_routine
create_routine_template
Afterwards, the routine is ready to run.
If a routine contains some subroutines, the subroutines will be initialized
at runtime when the parent routine is running the subroutine step.
"""
def __init__(self,
dev,
routine=None,
autorun=True,
**kw,
):
"""Initializes the routine.
Args:
dev (Device): Device to be used for the routine
autorun (bool): If True, the routine will be run immediately after
initialization.
routine (Step): The parent routine of the routine.
Keyword args:
qubits (list): List of qubits to be used in the routine
Configuration parameters (coming from the configuration parameter
dictionary):
update (bool): If True, the routine will overwrite qubit attributes
with the values found in the routine. Note that if the routine
is used as subroutine, this should be set to True.
save_instrument_settings (bool): If True, the routine will save the
instrument settings before and after the routine.
verbose: If True, the routine will print out the progress of the
routine. Default is True.
"""
self.kw = kw
self.autorun = autorun
# Call Step constructor
super().__init__(dev, routine, **kw)
self.parameter_sublookups = ['General']
self.leaf = False
self.step_label = self.step_label or self.name
self.DCSources = self.kw.pop("DCSources", None)
self.routine_steps: List[Step] = []
self.current_step_index = 0
self.routine_template: Optional[RoutineTemplate] = None
self.current_step: Optional[Step] = None
self.current_step_settings: Optional[Dict] = None
self.current_step_tmp_vals: Optional[List[Tuple[Any, Any]]] = None
# MC - trying to get it from either the device or the qubits
for source in [self.dev] + self.qubits:
try:
self.MC = source.instr_mc.get_instr()
break
except KeyError: # instr_mc not a valid instrument (e.g., None)
pass
self.create_initial_parameters()
# Registering start of routine so all data in measurement period can
# be retrieved later to determine the Hamiltonian model
self.preroutine_timestamp = self.MC.get_datetimestamp()
def merge_settings(self, lookups, sublookups):
"""Merges all scopes relevant for a particular child step. The settings
are retrieved and merged recursively to ensure that the priority
specified in lookups and sublookups is respected.
Example of how the settings are merged in chronological order
with the following routine:
Routine [None, "Routine"]
SubRoutine ["subroutine_label", "SubRoutine"]
ExperimentStep ["experiment_label", "Experiment"]
a) Initialization of Routine's steps. This will extract the settings
of SubRoutine from the configuration parameter dictionary.
Call: Routine.merge_settings(lookups=[None, "Routine"],
sublookups=["subroutine_label", "SubRoutine"])
Look for the relevant settings in this order:
1) Routine.settings["SubRoutine"]
2) Routine.settings["subroutine_label"]
3) Routine.settings["Routine"]["SubRoutine"]
4) Routine.settings["Routine"]["subroutine_label"]
At the end, SubRoutine.settings will be updated according to the
hierarchy specified in the lookups.
b) Initialization of SubRoutine's steps (occurs at runtime). This will
extract the settings of ExperimentStep from the configuration parameter
dictionary.
Call: SubRoutine.merge_settings(lookups=["subroutine_label","SubRoutine"],
sublookups=["experiment_label","Experiment"])
Call: Routine.merge_settings(lookups=["subroutine_label","SubRoutine"],
sublookups=["experiment_label","Experiment"])
Look for the relevant settings in this order:
1) Routine.settings["Experiment"]
2) Routine.settings["experiment_label"]
3) Routine.settings["SubRoutine"]["Experiment"]
4) Routine.settings["SubRoutine"]["experiment_label"]
5) Routine.settings["subroutine_label"]["Experiment"]
6) Routine.settings["subroutine_label"]["experiment_label"]
7) SubRoutine.settings["Experiment"]
8) SubRoutine.settings["experiment_label"]
9) SubRoutine.settings["SubRoutine"]["Experiment"]
10) SubRoutine.settings["SubRoutine"]["experiment_label"]
11) SubRoutine.settings["subroutine_label"]["Experiment"]
12) SubRoutine.settings["subroutine_label"]["experiment_label"]
The dictionary of settings that were merged according to the
hierarchy specified in the lookups can be used to update
:obj:`Step.settings`.
Arguments:
lookups (list): A list of all scopes for the parent routine
of the step whose settings need to be merged. The elements
of the list will be interpreted in descending order of priority.
sublookups (list): A list of scopes for the step whose settings need
to be merged. The elements of the list will be interpreted in
descending order of priority.
Returns:
dict: The dictionary containing the merged settings.
"""
if self.routine is not None:
# If the current step has a parent routine, call its merge_settings
# recursively
settings = self.routine.merge_settings(lookups, sublookups)
else:
# If the root routine is calling the function, then initialize
# an empty dictionary for the settings of the child step
settings = {}
for sublookup in reversed(sublookups):
# Looks for the sublookups directly in the settings. If self is the
# root routine, this corresponds to looking in the first layer of
# the configuration parameter dictionary, where the most general
# settings are stored.
# E.g., ['experiment_label', 'Experiment'] will first be looked
# up in the most general settings.
if sublookup in self.settings:
update_nested_dictionary(settings, self.settings[sublookup])
# Look for the entries settings[lookup][sublookup] (if both the lookup
# and the sublookup entries exist) or settings[lookup] (if only the
# lookup entry exist, but not the sublookup one)
for lookup in reversed(lookups):
if lookup in self.settings:
if sublookups is not None:
for sublookup in reversed(sublookups):
if sublookup in self.settings[lookup]:
update_nested_dictionary(
settings, self.settings[lookup][sublookup])
else:
update_nested_dictionary(settings,
self.settings[lookup])
return settings
def extract_step_settings(self,
step_class: Type[Step],
step_label: str,
step_settings=None,
lookups=None,
sublookups=None):
"""Extract the settings of a step from the configuration parameter
dictionary that was loaded and built from the JSON config files. The
entry 'settings' of step_settings is also included in the returned
settings.
Args:
step_class (Step): The class of the step whose settings need to be
extracted.
step_label (str): The label of the step whose settings need to be
extracted.
step_settings (dict, optional): Additional settings of the step
whose settings need to be extracted. The entry
step_settings['settings'] will be included in the returned
settings. The settings contained in step_settings['settings']
will have priority over those found in the configuration
parameter dictionary. If None, an empty dictionary is used.
Defaults to None.
lookups (list, optional): A list of all scopes for the parent
routine of the step whose settings need to be merged. The
elements of the list will be interpreted in descending order of
priority. If None, [routine_label, RoutineClass] will be used.
Defaults to None.
sublookups (list, optional): A list of scopes for the step whose
settings need to be merged. The elements of the list will be
interpreted in descending order of priority. If None,
[step_label, StepClass] will be used. Defaults to None.
Defaults to None.
Returns:
dict: A dictionary containing the settings extracted from the
configuration parameter dictionary.
"""
if not issubclass(step_class, Step):
raise NotImplementedError("Steps have to inherit from class Step.")
if step_settings is None:
step_settings = {}
# No 'General' lookup since at this point we are only interested
# in retrieving the settings of each step of a routine, not the settings
# of the routine itself
if lookups is None:
lookups = [self.step_label, self.get_lookup_class().__name__]
if sublookups is None:
sublookups = [step_label, step_class.get_lookup_class().__name__]
autocalib_settings = self.settings.copy({
step_class.get_lookup_class().__name__:
self.merge_settings(lookups, sublookups)
})
update_nested_dictionary(autocalib_settings,
step_settings.get('settings', {}))
return autocalib_settings
def create_routine_template(self):
"""Creates routine template. Can be overwritten or extended by children
for more complex routines that require adaptive creation. The settings
for each step are extracted from the configuration parameters
dictionary.
"""
# Create RoutineTemplate based on _DEFAULT_ROUTINE_TEMPLATE
self.routine_template = copy.deepcopy(self._DEFAULT_ROUTINE_TEMPLATE)
for step in self.routine_template:
# Retrieve the step settings from the configuration parameter
# dictionary. The settings will be merged according to the correct
# hierarchy (more specific settings will overwrite less specific
# settings)
step_settings = self.extract_step_settings(step[0], step[1],
step[2])
step[2]['settings'] = step_settings
# standard global settings
delegate_plotting = self.get_param_value('delegate_plotting')
self.routine_template.global_settings.update({
"dev": self.dev,
"update": True, # all subroutines should update relevant params
"delegate_plotting": delegate_plotting,
})
# add user specified global settings
update_nested_dictionary(
self.routine_template.global_settings,
self.kw.get("global_settings", {}),
)
def split_step_for_parallel_groups(self, index):
"""Replace the step at the given index with multiple steps according
to the parallel groups defined in the configuration parameter
dictionary.
The multiple steps will be added starting from the given index and after
it (the first one at the given index, the second one at index + 1 and so
on).
If no parallel groups are found, the step is left unchanged.
Args:
index (int): Index of the step to be replaced with the rearranged
steps.
"""
# Get the details of the step to be replaced
step = self.routine_template[index]
step_class = step[0]
step_label = step[1]
step_settings = step[2]
try:
step_tmp_settings = step[3]
except IndexError:
step_tmp_settings = []
# Look for the keyword 'parallel_groups' in the settings
lookups = [
step_label,
step_class.get_lookup_class().__name__, 'General'
]
parallel_groups = self.get_param_value('parallel_groups',
sublookups=lookups,
leaf=True)
if parallel_groups is not None:
new_step_index = index
# Remove existing step
self.routine_template.pop(index)
for parallel_group in parallel_groups:
# Find the qubits belonging to parallel_group
qubits_filtered = [
qb for qb in self.qubits if
(qb.name == parallel_group or
parallel_group in self.get_qubit_groups(qb.name))
]
# Create a new step for qubits_filtered only and add it to the
# routine template
if len(qubits_filtered) != 0:
new_settings = copy.deepcopy(step_settings)
new_settings['qubits'] = qubits_filtered
self.add_step(step_class,
step_label,
new_settings,
step_tmp_settings,
index=new_step_index)
new_step_index += 1
def prepare_step(self, i=None):
"""Prepares the next step in the routine. That is, it initializes the
measurement object. The steps of the routine are instantiated here.
Args:
i (int): Index of the step to be prepared. If None, the default is
set to the current_step_index.
"""
if i is None:
i = self.current_step_index
# Setting step class and settings
step_class = self.get_step_class_at_index(i)
step_settings = self.get_step_settings_at_index(i)
step_label = self.get_step_label_at_index(i)
# Setting the temporary values
self.current_step_tmp_vals = self.get_step_tmp_vals_at_index(i)
# Update print
if self.get_param_value('verbose'):
print(f"{self.name}, step {i} "
f"({self.routine_template.step_name(index=i)}), preparing...")
qubits = step_settings.pop('qubits', self.qubits)
dev = step_settings.pop('dev', self.dev)
autocalib_settings = self.settings.copy(
overwrite_dict=step_settings.pop('settings', {}))
# Executing the step with corresponding settings
if issubclass(step_class, qbcal.SingleQubitGateCalibExperiment) or \
issubclass(step_class, QuantumExperiment):
step = step_class(qubits=qubits,
routine=self,
dev=dev,
step_label=step_label,
settings=autocalib_settings,
**step_settings)
elif issubclass(step_class, IntermediateStep):
step = step_class(routine=self,
dev=dev,
step_label=step_label,
qubits=qubits,
autorun=False,
settings=autocalib_settings,
**step_settings)
elif issubclass(step_class, AutomaticCalibrationRoutine):
step = step_class(routine=self,
dev=dev,
step_label=step_label,
qubits=qubits,
autorun=False,
settings=autocalib_settings,
**step_settings)
else:
raise ValueError(f"automatic subroutine is not compatible (yet)"
f"with the current step class {step_class}")
self.current_step = step
self.current_step_settings = step_settings
def execute_step(self):
"""
Executes the current step (routine.current_step) in the routine and
writes the result in the routine_steps list.
"""
if self.get_param_value('verbose'):
j = self.current_step_index
print(f"{self.name}, step {j} "
f"({self.routine_template.step_name(index=j)}), executing...")
self.current_step.run()
self.current_step.post_run() # optional. Update results for example.
self.routine_steps.append(self.current_step)
self.current_step_index += 1
def run(self, start_index=None, stop_index=None):
"""Runs the complete automatic calibration routine. In case the routine
was already completed, the routine is reset and run again. In case the
routine was interrupted, it will run from the last completed step, the
index of which is saved in the current_step_index attribute of the
routine.
Additionally, it is possible to start the routine from a specific step.
Args:
start_index (int): Index of the step to start with.
stop_index (int): Index of the step to stop before. The step at this
index will NOT be executed. Indices start at 0.
For example, if a routine consists of 3 steps, [step0, step1,
step2], then the method will stop before step2 (and thus after
step1), if stop_index is set to 2.
FIXME: There's an issue when starting from a given start index. The
routine_steps is only wiped if the routine ran completely and is reran
from the start. In the future, it might be good to implement a way so
the user can choose if previous results should be wiped or not (that is,
if routine_steps should be wiped or not).
"""
routine_name = self.name
# Saving instrument settings pre-routine
if (self.get_param_value('save_instrument_settings') or
not self.get_param_value("update")):
# saving instrument settings before the routine
self.MC.create_instrument_settings_file(
f"pre-{self.name}_routine-settings")
self.preroutine_timestamp = a_tools.get_last_n_timestamps(1)[0]
# Rerun routine if already finished
if (len(self.routine_template) != 0) and (self.current_step_index >=
len(self.routine_template)):
self.create_initial_routine(load_parameters=False)
self.run()
self.post_run()
return
# Start and stop indices
if start_index is not None:
self.current_step_index = start_index
elif self.current_step_index >= len(self.routine_template):
self.current_step_index = 0
if stop_index is None:
stop_index = np.Inf
# Running the routine
while self.current_step_index < len(self.routine_template):
j = self.current_step_index
step_name = self.routine_template.step_name(index=j)
# Preparing the next step (incl. temporary values)
self.prepare_step()
# Interrupting if we reached the stop condition
if self.current_step_index >= stop_index:
if self.get_param_value('verbose'):
print(f"Partial routine {routine_name} stopped before "
f"executing step {j} ({step_name}).")
return
# Executing the step
with temporary_value(*self.current_step_tmp_vals):
self.execute_step()
if self.get_param_value('verbose'):
print(f"{routine_name}, step {j} ({step_name}), done!", "\n")
if self.get_param_value('verbose'):
print(f"Routine {routine_name} finished!")
def post_run(self):
routine_name = self.name
# Saving instrument settings post-routine
if (self.get_param_value('save_instrument_settings') or
not self.get_param_value("update")):
# Saving instrument settings after the routine
self.MC.create_instrument_settings_file(
f"post-{routine_name}_routine-settings")
# Reloading instrument settings if update is False
if not self.get_param_value("update"):
if self.get_param_value('verbose'):
print(f"Reloading instrument settings from before routine "
f"(ts {self.preroutine_timestamp})")
reload_settings(self.preroutine_timestamp,
qubits=self.qubits,
dev=self.dev,
DCSources=self.DCSources,
fluxlines_dict=self.kw.get("fluxlines_dict")
)
def create_initial_parameters(self):
"""Adds any keyword passed to the routine constructor to the
configuration parameter dictionary. For an AutomaticCalibrationRoutine,
these keyword will be added to the 'General' scope. These settings
would have priority over the settings specified in the keyword
'settings_user'.
"""
update_nested_dictionary(
self.settings,
{self.highest_lookup: {
self.highest_sublookup: self.kw
}})
def create_initial_routine(self, load_parameters=True):
"""Creates (or recreates) initial routine by defining the routine
template, set routine_steps to an empty array, and setting the
current step to 0.
Args:
load_parameters (bool): Whether to reload the initial parameters.
Defaults to True.
NOTE: This method wipes the results of the previous run stored in
routine_steps.
"""
# Loading initial parameters. Note that if load_parameters=False,
# the parameters are not reloaded and thus remain the same. This is
# desired when wanting to rerun a routine
if load_parameters:
self.create_initial_parameters()
self.create_routine_template()
# making sure all subroutines update relevant parameters
self.routine_template.update_all_step_settings({"update": True})
def final_init(self, **kwargs):
"""A function to be called after the initialization of all base classes,
since some functionality in the init of a routine needs the base
classes already initialized.
"""
# Loading hierarchical settings and creating initial routine
self.create_initial_routine(load_parameters=False)
if self.autorun:
# FIXME: if the init does not finish the object does not exist and
# the routine results are not accessible
try:
self.run()
self.post_run()
except:
log.error(
"Autorun failed to fully run, concluded routine steps "
"are stored in the routine_steps attribute.",
exc_info=True,
)
@property
def parameters_qubit(self):
"""
Returns:
dict: The parameters of the qubit, including the read-out frequency,
the anharmonicity and (if present) the latest Hamiltonian model
parameters containing the total Josephson energy, the charging
energy, voltage per phi0, the dac voltage, the asymmetry, the
coupling constant and bare read-out resonator frequency
(overwriting the previous frb value).
FIXME: The selection of parameters extracted from the qb is currently
tailored to the first example use cases. This either needs to be
generalized to extract more parameters here, or we could decide the
concrete routines could override the method to extract their specific
parameters.
"""
qb = self.qubit
settings = {}
hamfit_model = qb.fit_ge_freq_from_dc_offset()
# Extracting settings from the qubit
settings.update({
"fr": hamfit_model.get("fr", qb.ro_freq()),
"anharmonicity": qb.anharmonicity(),
})
# Getting transmon settings from present Hamiltonian model if it exists
settings.update(hamfit_model)
return settings
def view(self,
print_global_settings=True,
print_general_settings=True,
print_tmp_vals=False,
print_results=True,
**kws
):
"""Prints a user-friendly representation of the routine template.
Args:
print_global_settings (bool): If True, prints the global settings
of the routine. Defaults to True.
print_general_settings (bool): If True, prints the 'General' scope
of the routine settings. Defaults to True.
print_tmp_vals (bool): If True, prints the temporary values of the
routine. Defaults to False.
print_results (bool): If True, prints the results dicts of all the
steps of the routine.
"""
print(self.name)
if print_global_settings:
print("Global settings:")
pprint.pprint(self.global_settings)
print()
if print_general_settings:
print("General settings:")
pprint.pprint(self.settings[self.name]['General'])
print()
for i, x in enumerate(self.routine_template):
print(f"Step {i}, {x[0].__name__} ({x[1]})")
print("Settings:")
pprint.pprint(x[2], indent=4)
if print_tmp_vals:
try:
print("Temporary values:")
pprint.pprint(x[3], indent=4)
except IndexError:
pass
print()
if print_results:
print_step_results(self)
def update_settings_at_index(self, settings: dict, index):
"""Updates the settings of the step at position 'index'. Wrapper of
the method of RoutineTemplate.
Args:
settings (dict): The new settings that will update the existing
settings of the step at position 'index' in the routine
template.
index (int): Index of the step for which the temporary values should
be used.
"""
self.routine_template.update_settings_at_index(settings, index)
def get_step_class_at_index(self, index):
"""Returns the step class for a specific step in the routine template.
Wrapper of the method of RoutineTemplate.
Args:
index (int): Index of the step for which the settings are to be
returned.
Returns:
class: The class of the step at position 'index' in the routine
template.
"""
return self.routine_template.get_step_class_at_index(index)
def get_step_label_at_index(self, index):
"""Returns the step label for a specific step in the routine template.
Args:
index (int): Index of the step for which the step label is to be
returned.
Returns:
str: The label of the step at position 'index' in the routine
template.
"""
return self.routine_template.get_step_label_at_index(index)
def get_step_settings_at_index(self, index):
"""Returns the settings for a specific step in the routine template.
Args:
index (int): Index of the step for which the settings are to be
returned.
Returns:
dict: The settings dictionary of the step at position 'index' in
the routine template.
"""
return self.routine_template.get_step_settings_at_index(index)
def get_step_tmp_vals_at_index(self, index):
"""Returns the temporary values of the step at index.
Args:
index (int): Index of the step for which the settings are to be
returned.
Returns:
list: The temporary values for the step at position 'index' in the
routine template. Each entry is a tuple made of a
QCoDeS parameter and its temporary value.
"""
return self.routine_template.get_step_tmp_vals_at_index(index)
def extend_step_tmp_vals_at_index(self, tmp_vals, index):
"""Extends the temporary values of the step at index. If the step does
not have any temporary values, it sets the temporary values to the
passed temporary values.
Args:
tmp_vals (list): The temporary values for the step at position
'index' in the routine template. Each entry should be a tuple
made of a QCoDeS parameter and its temporary value.
index (int): Index of the step for which the temporary values should
be used.
"""
self.routine_template.extend_step_tmp_vals_at_index(tmp_vals=tmp_vals,
index=index)
def add_step(self,
step_class: Type[Step],
step_label: str,
step_settings: Optional[Dict[str, Any]] = None,
step_tmp_vals=None,
index=None):
"""Adds a step to the routine template. The settings of the step are
extracted from the configuration parameter dictionary.
Args:
step_class (Step): Class of the step
step_label (str): Label of the step
step_settings (dict, optional): Settings of the step. If any settings
are found in step_settings['settings'], they will have priority
over those found in the configuration parameter dictionary.
step_tmp_vals (list, optional): Temporary values for the step. Each
entry is a tuple made of a QCoDeS parameter and its
temporary value. Defaults to None.
index (int, optional): Index of the routine template at which the
step should be added. If None, the step will be added at the end
of the routine. Defaults to None.
"""
updated_step_settings = self.extract_step_settings(
step_class, step_label, step_settings)
step_settings['settings'] = updated_step_settings
self.routine_template.add_step(step_class,
step_label,
step_settings,
step_tmp_vals=step_tmp_vals,
index=index)
def get_empty_device_properties_dict(self, step_type=None):
"""Returns an empty dictionary of the following structure, for use with
`get_device_property_values`
Example:
.. code-block:: python
{
'step_type': step_type,
'property_values': [],
'timestamp': '20220101_161403',
}
Args:
step_type (str, optional): The name of the step. Defaults to the
class name.
Returns:
dict: An empty results dictionary (i.e., no results)
"""
return {
'step_type':
step_type if step_type is not None else str(
type(self).__name__),
'property_values': [],
'timestamp':
self.preroutine_timestamp,
}
def get_device_property_values(self, **kwargs):
"""Returns a dictionary of high-level device property values from
running this routine, and all of its steps.
`qubit_sweet_spots` can be used to prefix `property_type` based on the
sweet-spots of the qubit. An example for `qubit_sweet_spots` is given
below. `None` as a sweet-spot will not add a prefix.
.. code-block:: python
{
'qb1': 'uss',
'qb4': 'lss',
'qb7': None,
}
An example of what is returned is given below.
Example:
.. code-block:: python
{
'step_type':
'AutomaticCalibrationRoutine',
'timestamp':
'20220101_163859', # from self.preroutine_timestamp
'property_values': [
{
'step_type': 'T1Step',
'property_values': [
{
'qubits': ['qb1'],
'component_type': 'qb',
'property_type': 'ge_T1_time',
'value': 1.6257518120474107e-05,
'timestamp': '20220101_163859',
'folder_name': 'Q:\\....',
},
]
},
{
'step_type': 'RamseyStep',
'property_values': [
{
'qubits': ['qb1'],
'component_type': 'qb',
'property_type': 'ge_t2_echo',
'value': 7.1892927355629493e-06,
'timestamp': '20220101_163859',
'folder_name': 'Q:\\....',
},
]
}
]
}
Returns:
dict: dictionary of high-level device property_values determined by
this routine
"""
results = self.get_empty_device_properties_dict()
for _, step in enumerate(self.routine_steps):
step_i_results = step.get_device_property_values(**kwargs)
results['property_values'].append({
"step_type":
str(type(step).__name__)
if step_i_results.get('step_type') is None else
step_i_results.get('step_type'),
"property_values":
step_i_results['property_values'],
})
return results
@property
def global_settings(self):
"""
Returns:
dict: The global settings of the routine
"""
return self.routine_template.global_settings
@property
def name(self):
"""Returns the name of the routine.
"""
# Name depends on whether the object is initialized.
if type(self) is not type:
return type(self).__name__
else:
try:
return self.__name__
except:
return "AutomaticCalibration"
# Initializing necessary attributes, should/can be overridden by children
_DEFAULT_PARAMETERS = {}
_DEFAULT_ROUTINE_TEMPLATE = RoutineTemplate([])
def keyword_subset(keyword_arguments, allowed_keywords):
"""Returns a dictionary with only the keywords that are specified in
allowed_keywords.
Args:
keyword_arguments (dict): Original dictionary from which the allowed
keywords will be extracted.
allowed_keywords (list): List of keywords to pick from the original
dictionary.
Returns:
dict: The new dictionary containing only the allowed keywords and the
corresponding values found in keyword_arguments.
"""
keywords = set(keyword_arguments.keys())
keyswords_to_extract = keywords.intersection(allowed_keywords)
new_kw = {key: keyword_arguments[key] for key in keyswords_to_extract}
return new_kw
def keyword_subset_for_function(keyword_arguments, function):
"""Returns a dictionary with only the keywords that are used by the
function.
Args:
keyword_arguments (dict): Original dictionary from which the allowed
keywords will be extracted.
function (function): Function from which the allowed arguments are
extracted.
Returns:
dict: The new dictionary containing only the keywords arguments
extracted from the given function.
"""
allowed_keywords = inspect.getfullargspec(function)[0]
return keyword_subset(keyword_arguments, allowed_keywords)
def print_step_results(step: Step, routine_name: str = ''):
"""Recursively print the results of the step and its sub-steps."""
if step.results: # Do not print None or empty dictionaries
print(f'{routine_name} Step {step.step_label} results:')
pprint.pprint(step.results)
print()
if hasattr(step, 'routine_steps'): # A routine with sub-steps
for sub_step in step.routine_steps:
print_step_results(sub_step, routine_name=step.step_label)
else:
pass
|
990,364 | 3e42b7d59de2a8da2fed873088a477e0dfbdfdc0 | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CustomerDataAddressInterface(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'customer_id': 'int',
'region': 'CustomerDataRegionInterface',
'region_id': 'int',
'country_id': 'str',
'street': 'list[str]',
'company': 'str',
'telephone': 'str',
'fax': 'str',
'postcode': 'str',
'city': 'str',
'firstname': 'str',
'lastname': 'str',
'middlename': 'str',
'prefix': 'str',
'suffix': 'str',
'vat_id': 'str',
'default_shipping': 'bool',
'default_billing': 'bool',
'extension_attributes': 'CustomerDataAddressExtensionInterface',
'custom_attributes': 'list[FrameworkAttributeInterface]'
}
attribute_map = {
'id': 'id',
'customer_id': 'customer_id',
'region': 'region',
'region_id': 'region_id',
'country_id': 'country_id',
'street': 'street',
'company': 'company',
'telephone': 'telephone',
'fax': 'fax',
'postcode': 'postcode',
'city': 'city',
'firstname': 'firstname',
'lastname': 'lastname',
'middlename': 'middlename',
'prefix': 'prefix',
'suffix': 'suffix',
'vat_id': 'vat_id',
'default_shipping': 'default_shipping',
'default_billing': 'default_billing',
'extension_attributes': 'extension_attributes',
'custom_attributes': 'custom_attributes'
}
def __init__(self, id=None, customer_id=None, region=None, region_id=None, country_id=None, street=None, company=None, telephone=None, fax=None, postcode=None, city=None, firstname=None, lastname=None, middlename=None, prefix=None, suffix=None, vat_id=None, default_shipping=None, default_billing=None, extension_attributes=None, custom_attributes=None):
"""
CustomerDataAddressInterface - a model defined in Swagger
"""
self._id = None
self._customer_id = None
self._region = None
self._region_id = None
self._country_id = None
self._street = None
self._company = None
self._telephone = None
self._fax = None
self._postcode = None
self._city = None
self._firstname = None
self._lastname = None
self._middlename = None
self._prefix = None
self._suffix = None
self._vat_id = None
self._default_shipping = None
self._default_billing = None
self._extension_attributes = None
self._custom_attributes = None
if id is not None:
self.id = id
if customer_id is not None:
self.customer_id = customer_id
if region is not None:
self.region = region
if region_id is not None:
self.region_id = region_id
if country_id is not None:
self.country_id = country_id
if street is not None:
self.street = street
if company is not None:
self.company = company
if telephone is not None:
self.telephone = telephone
if fax is not None:
self.fax = fax
if postcode is not None:
self.postcode = postcode
if city is not None:
self.city = city
if firstname is not None:
self.firstname = firstname
if lastname is not None:
self.lastname = lastname
if middlename is not None:
self.middlename = middlename
if prefix is not None:
self.prefix = prefix
if suffix is not None:
self.suffix = suffix
if vat_id is not None:
self.vat_id = vat_id
if default_shipping is not None:
self.default_shipping = default_shipping
if default_billing is not None:
self.default_billing = default_billing
if extension_attributes is not None:
self.extension_attributes = extension_attributes
if custom_attributes is not None:
self.custom_attributes = custom_attributes
@property
def id(self):
"""
Gets the id of this CustomerDataAddressInterface.
ID
:return: The id of this CustomerDataAddressInterface.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this CustomerDataAddressInterface.
ID
:param id: The id of this CustomerDataAddressInterface.
:type: int
"""
self._id = id
@property
def customer_id(self):
"""
Gets the customer_id of this CustomerDataAddressInterface.
Customer ID
:return: The customer_id of this CustomerDataAddressInterface.
:rtype: int
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""
Sets the customer_id of this CustomerDataAddressInterface.
Customer ID
:param customer_id: The customer_id of this CustomerDataAddressInterface.
:type: int
"""
self._customer_id = customer_id
@property
def region(self):
"""
Gets the region of this CustomerDataAddressInterface.
:return: The region of this CustomerDataAddressInterface.
:rtype: CustomerDataRegionInterface
"""
return self._region
@region.setter
def region(self, region):
"""
Sets the region of this CustomerDataAddressInterface.
:param region: The region of this CustomerDataAddressInterface.
:type: CustomerDataRegionInterface
"""
self._region = region
@property
def region_id(self):
"""
Gets the region_id of this CustomerDataAddressInterface.
Region ID
:return: The region_id of this CustomerDataAddressInterface.
:rtype: int
"""
return self._region_id
@region_id.setter
def region_id(self, region_id):
"""
Sets the region_id of this CustomerDataAddressInterface.
Region ID
:param region_id: The region_id of this CustomerDataAddressInterface.
:type: int
"""
self._region_id = region_id
@property
def country_id(self):
"""
Gets the country_id of this CustomerDataAddressInterface.
Country code in ISO_3166-2 format
:return: The country_id of this CustomerDataAddressInterface.
:rtype: str
"""
return self._country_id
@country_id.setter
def country_id(self, country_id):
"""
Sets the country_id of this CustomerDataAddressInterface.
Country code in ISO_3166-2 format
:param country_id: The country_id of this CustomerDataAddressInterface.
:type: str
"""
self._country_id = country_id
@property
def street(self):
"""
Gets the street of this CustomerDataAddressInterface.
Street
:return: The street of this CustomerDataAddressInterface.
:rtype: list[str]
"""
return self._street
@street.setter
def street(self, street):
"""
Sets the street of this CustomerDataAddressInterface.
Street
:param street: The street of this CustomerDataAddressInterface.
:type: list[str]
"""
self._street = street
@property
def company(self):
"""
Gets the company of this CustomerDataAddressInterface.
Company
:return: The company of this CustomerDataAddressInterface.
:rtype: str
"""
return self._company
@company.setter
def company(self, company):
"""
Sets the company of this CustomerDataAddressInterface.
Company
:param company: The company of this CustomerDataAddressInterface.
:type: str
"""
self._company = company
@property
def telephone(self):
"""
Gets the telephone of this CustomerDataAddressInterface.
Telephone number
:return: The telephone of this CustomerDataAddressInterface.
:rtype: str
"""
return self._telephone
@telephone.setter
def telephone(self, telephone):
"""
Sets the telephone of this CustomerDataAddressInterface.
Telephone number
:param telephone: The telephone of this CustomerDataAddressInterface.
:type: str
"""
self._telephone = telephone
@property
def fax(self):
"""
Gets the fax of this CustomerDataAddressInterface.
Fax number
:return: The fax of this CustomerDataAddressInterface.
:rtype: str
"""
return self._fax
@fax.setter
def fax(self, fax):
"""
Sets the fax of this CustomerDataAddressInterface.
Fax number
:param fax: The fax of this CustomerDataAddressInterface.
:type: str
"""
self._fax = fax
@property
def postcode(self):
"""
Gets the postcode of this CustomerDataAddressInterface.
Postcode
:return: The postcode of this CustomerDataAddressInterface.
:rtype: str
"""
return self._postcode
@postcode.setter
def postcode(self, postcode):
"""
Sets the postcode of this CustomerDataAddressInterface.
Postcode
:param postcode: The postcode of this CustomerDataAddressInterface.
:type: str
"""
self._postcode = postcode
@property
def city(self):
"""
Gets the city of this CustomerDataAddressInterface.
City name
:return: The city of this CustomerDataAddressInterface.
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""
Sets the city of this CustomerDataAddressInterface.
City name
:param city: The city of this CustomerDataAddressInterface.
:type: str
"""
self._city = city
@property
def firstname(self):
"""
Gets the firstname of this CustomerDataAddressInterface.
First name
:return: The firstname of this CustomerDataAddressInterface.
:rtype: str
"""
return self._firstname
@firstname.setter
def firstname(self, firstname):
"""
Sets the firstname of this CustomerDataAddressInterface.
First name
:param firstname: The firstname of this CustomerDataAddressInterface.
:type: str
"""
self._firstname = firstname
@property
def lastname(self):
"""
Gets the lastname of this CustomerDataAddressInterface.
Last name
:return: The lastname of this CustomerDataAddressInterface.
:rtype: str
"""
return self._lastname
@lastname.setter
def lastname(self, lastname):
"""
Sets the lastname of this CustomerDataAddressInterface.
Last name
:param lastname: The lastname of this CustomerDataAddressInterface.
:type: str
"""
self._lastname = lastname
@property
def middlename(self):
"""
Gets the middlename of this CustomerDataAddressInterface.
Middle name
:return: The middlename of this CustomerDataAddressInterface.
:rtype: str
"""
return self._middlename
@middlename.setter
def middlename(self, middlename):
"""
Sets the middlename of this CustomerDataAddressInterface.
Middle name
:param middlename: The middlename of this CustomerDataAddressInterface.
:type: str
"""
self._middlename = middlename
@property
def prefix(self):
"""
Gets the prefix of this CustomerDataAddressInterface.
Prefix
:return: The prefix of this CustomerDataAddressInterface.
:rtype: str
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
"""
Sets the prefix of this CustomerDataAddressInterface.
Prefix
:param prefix: The prefix of this CustomerDataAddressInterface.
:type: str
"""
self._prefix = prefix
@property
def suffix(self):
"""
Gets the suffix of this CustomerDataAddressInterface.
Suffix
:return: The suffix of this CustomerDataAddressInterface.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""
Sets the suffix of this CustomerDataAddressInterface.
Suffix
:param suffix: The suffix of this CustomerDataAddressInterface.
:type: str
"""
self._suffix = suffix
@property
def vat_id(self):
"""
Gets the vat_id of this CustomerDataAddressInterface.
Vat id
:return: The vat_id of this CustomerDataAddressInterface.
:rtype: str
"""
return self._vat_id
@vat_id.setter
def vat_id(self, vat_id):
"""
Sets the vat_id of this CustomerDataAddressInterface.
Vat id
:param vat_id: The vat_id of this CustomerDataAddressInterface.
:type: str
"""
self._vat_id = vat_id
@property
def default_shipping(self):
"""
Gets the default_shipping of this CustomerDataAddressInterface.
If this address is default shipping address.
:return: The default_shipping of this CustomerDataAddressInterface.
:rtype: bool
"""
return self._default_shipping
@default_shipping.setter
def default_shipping(self, default_shipping):
"""
Sets the default_shipping of this CustomerDataAddressInterface.
If this address is default shipping address.
:param default_shipping: The default_shipping of this CustomerDataAddressInterface.
:type: bool
"""
self._default_shipping = default_shipping
@property
def default_billing(self):
"""
Gets the default_billing of this CustomerDataAddressInterface.
If this address is default billing address
:return: The default_billing of this CustomerDataAddressInterface.
:rtype: bool
"""
return self._default_billing
@default_billing.setter
def default_billing(self, default_billing):
"""
Sets the default_billing of this CustomerDataAddressInterface.
If this address is default billing address
:param default_billing: The default_billing of this CustomerDataAddressInterface.
:type: bool
"""
self._default_billing = default_billing
@property
def extension_attributes(self):
"""
Gets the extension_attributes of this CustomerDataAddressInterface.
:return: The extension_attributes of this CustomerDataAddressInterface.
:rtype: CustomerDataAddressExtensionInterface
"""
return self._extension_attributes
@extension_attributes.setter
def extension_attributes(self, extension_attributes):
"""
Sets the extension_attributes of this CustomerDataAddressInterface.
:param extension_attributes: The extension_attributes of this CustomerDataAddressInterface.
:type: CustomerDataAddressExtensionInterface
"""
self._extension_attributes = extension_attributes
@property
def custom_attributes(self):
"""
Gets the custom_attributes of this CustomerDataAddressInterface.
Custom attributes values.
:return: The custom_attributes of this CustomerDataAddressInterface.
:rtype: list[FrameworkAttributeInterface]
"""
return self._custom_attributes
@custom_attributes.setter
def custom_attributes(self, custom_attributes):
"""
Sets the custom_attributes of this CustomerDataAddressInterface.
Custom attributes values.
:param custom_attributes: The custom_attributes of this CustomerDataAddressInterface.
:type: list[FrameworkAttributeInterface]
"""
self._custom_attributes = custom_attributes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CustomerDataAddressInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
990,365 | 3f86870ac881235a34962bfeba12779634df4474 | import pandas as pd
import numpy as np
from pandas import DataFrame ,Series
data_train = pd.read_csv("train.csv")
import matplotlib.pyplot as plt
fig = plt.figure()
#图案颜色
fig.set(alpha = 0.2)
# 获救
plt.subplot2grid((2,3),(0,0))
data_train.Survived.value_counts().plot(kind = 'bar')
plt.title('Surivred')
plt.ylabel('amount')
# 乘客等级分布
plt.subplot2grid((2,3),(0,1))
data_train.Pclass.value_counts().plot(kind = 'bar')
plt.title('pclass')
plt.ylabel('amount')
# 年龄
plt.subplot2grid((2,3),(0,2))
plt.scatter(data_train.Survived,data_train.Age)
plt.grid(b=True,which='major',axis='y')
plt.title('age')
plt.ylabel('amount')
# 舱等级
plt.subplot2grid((2,3),(1,0),colspan = 2)
data_train.Age[data_train.Pclass == 1].plot(kind='kde')
data_train.Age[data_train.Pclass == 2].plot(kind='kde')
data_train.Age[data_train.Pclass == 3].plot(kind='kde')
plt.xlabel('age')
plt.ylabel('prob_dense')
plt.title('plcass')
plt.legend(('1','2','3舱'),loc = 'best')
# 登船口
plt.subplot2grid((2,3),(1,2))
data_train.Embarked.value_counts().plot(kind = 'bar')
plt.title('embarked')
plt.ylabel('amount')
plt.show()
# 等级与获救情况的关系
from sklearn.ensemble import RandomForestRegressor
def set_missing_ages(df):
#把已有的数值型特征提取出来放进RandomForestRegressor中
age_df = df[['Age','Fare','Parch','Sibsp','Pclass']]
#乘客分为已知年龄和未知年龄
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
#y即为目标年龄
y = known_age[:,0]
#x为特征属性
X = known_age[:,1:]
rfr = RandomForestRegressor(random_state=0,n_estimators=2000,n_jobs=-1)
rfr.fit(X,y)
predictedAges = rfr.predict(unknown_age[:,1::])
df.loc[(df.Age.isnull()),'Age'] = predictedAges
return df,rfr
def set_Cabin_type(df):
df.loc[(df.Cabin.notnull()),'Cabin']='Yes'
df.loc[(df.Cabin.isnull()),'Cabin']='No'
return df
data_train,rfr=set_missing_ages(data_train)
data_train =set_Cabin_type(data_train)
def train(epoch):
net.train()
print("train epoch:", epoch)
optimizer = torch.optim.Adam(net.parameters(), lr=opt.get_lr(epoch))
for batch_idx, (inputs, targets) in enumerate(trainloader):
if opt.USE_CUDA:
inputs, targets = inputs.cuda(), targets.cuda()
inputs = torch.autograd.Variable(inputs)
targets = torch.autograd.Variable(targets)
optimizer.zero_grad()
output_1, output_2, output_3 = net(inputs)
loss_1 = criterion(output_1, targets[:, 0])
loss_2 = criterion(output_2, targets[:, 1])
loss_3 = criterion(output_3, targets[:, 2])
loss = loss_1+loss_2+loss_3
loss.backward()
optimizer.step()
print("train epoch %d finished" % epoch) |
990,366 | 028430b4b2946b414336ed6558b62d3b06c1de48 | from libs.naver_shopping.crawler import crawl
from libs.naver_shopping.parser import parse
import json
pageString = crawl('')
products = parse(pageString)
print(len(products))
#json파일로 내보내기
##file = open("./products.json", "w+")
##file.write(json.dumps(products)) |
990,367 | 726d0c257249f2550cabb097464f41a592ab328b | # -*- coding: utf-8 -*-
# @Author : zhang35
# @Time : 2020/10/15 14:25
# @Function:
from database.postsqldb.db import db
from datetime import datetime
from geoalchemy2.types import Geometry
from flask_restful import fields
from geoalchemy2.elements import WKTElement
from statsmodels.tsa.statespace.varmax import VARMAX
from random import random
class ExceptionInfoModel(db.Model):
__tablename__ = 'exceptioninfo'
id = db.Column(db.Integer, primary_key=True)
object_id = db.Column(db.String(50))
exception_type = db.Column(db.String(255), nullable=False)
start_time = db.Column(db.DateTime, nullable=False)
end_time = db.Column(db.DateTime, nullable=False)
reason = db.Column(db.String(255), nullable=False)
def update(self, info):
self.end_time = info["end_time"]
self.reason = info["reason"]
def dictRepr(self):
info = {
"id": self.id,
"object_id": self.object_id,
"start_time": self.start_time.strftime("%Y-%m-%d %H:%M:%S"),
"end_time": self.end_time.strftime("%Y-%m-%d %H:%M:%S"),
"exception_type": self.exception_type,
"reason": self.reason
}
return info
|
990,368 | df12d2262770ec945abc4ceacd1addfabe00e5cd | #Reversing Strings
list1 = ["a","b","c","d"]
print (list1[::-1])
#Reversing Numbers
list2 = [1,3,6,4,2]
print (list2[::-1])
|
990,369 | 338c0899d16b062c3e66edae4031ecb44b4abc8d | from django.db import models
from django.contrib.auth.models import User
class Roles(object):
ADMIN = 0x01
CLIENT = 0x02
CHOICES = {
ADMIN: 'Admin',
CLIENT: 'Client'}
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
occupation = models.CharField(max_length=50,null=True, blank=True)
address = models.CharField(max_length=50,null=True, blank=True)
phone = models.CharField(max_length=10,null=True, blank=True)
place_of_birth = models.CharField(max_length=20,null=True, blank=True)
date_of_birth = models.DateTimeField(null=True, blank=True)
photo = models.CharField(max_length=250,null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField()
point = models.IntegerField(default=0)
def __unicode__(self):
return '%s' % (self.user)
class Category(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=50)
def get_sub_category(self):
subs_category = []
subs = SubCategory.objects.filter(category=self.id)
return subs
def __unicode__(self):
return self.name
class SubCategory(models.Model):
category = models.ForeignKey(Category)
name = models.CharField(max_length=50)
description = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class MasterStore(models.Model):
created_by = models.ForeignKey(User)
store_name = models.CharField(max_length=50)
store_address = models.CharField(max_length=50)
store_city = models.CharField(max_length=10)
store_logo = models.CharField(max_length=50, null=True, blank=True)
store_photo = models.CharField(max_length=50, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
store_rating = models.IntegerField()
def __unicode__(self):
return self.store_name
class MasterItem(models.Model):
category = models.ForeignKey(Category)
subcategory = models.ForeignKey(SubCategory)
created_by = models.ForeignKey(User)
store = models.ForeignKey(MasterStore)
name = models.CharField(max_length=250)
description = models.TextField()
picture = models.CharField(max_length=50)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now_add=True)
price = models.IntegerField()
point = models.IntegerField()
def __unicode__(self):
return self.name
class ItemReview(models.Model):
item = models.ForeignKey(MasterItem)
user = models.ForeignKey(User)
review = models.TextField()
def __unicode__(self):
return self.review
class ItemQuestion(models.Model):
item = models.ForeignKey(MasterItem)
user = models.ForeignKey(User)
question = models.TextField()
date_created = models.DateTimeField(auto_now_add=True)
def get_answers(self):
temp = []
answer = ItemAnswer.objects.filter(question=self.id).order_by('date_created')
if len(answer) <= 0:
return None
else:
for i in answer:
try:
photo = UserProfile.objects.get(user=i.user).photo
except:
photo = None
data = {'answer': i.answer, 'user': i.user, 'photo': photo}
temp.append(data)
return temp
def __unicode__(self):
return self.question
class ItemAnswer(models.Model):
question = models.ForeignKey(ItemQuestion)
user = models.ForeignKey(User)
answer = models.TextField()
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.answer
class News(models.Model):
user = models.ForeignKey(User)
title = models.CharField(max_length=100)
content = models.TextField()
picture = models.CharField(max_length=50)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title
class RequestItem(models.Model):
item_name = models.CharField(max_length=50)
description = models.TextField()
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return '%s-%s' % (self.item_name, self.description)
class AboutUs(models.Model):
desc = models.TextField()
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.desc
class PaymentGateway(models.Model):
name = models.CharField(max_length=250)
payment_number = models.CharField(max_length=100, null=True, blank=True)
payment_name = models.CharField(max_length=100, null=True, blank=True)
payment_type = models.CharField(max_length=50)
payment_currency = models.CharField(max_length=50)
is_active = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Shiping(models.Model):
name = models.CharField(max_length=250)
is_active = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Cities(models.Model):
name = models.CharField(max_length=250)
is_active = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class ShipingCost(models.Model):
shiping = models.ForeignKey(Shiping)
cities = models.ForeignKey(Cities)
price = models.IntegerField()
point = models.IntegerField()
class Order(models.Model):
purchase_date = models.DateTimeField(auto_now_add=True)
order_number = models.CharField(max_length=50)
price = models.IntegerField(default=0)
shipping_cost = models.IntegerField(default=0)
total_price = models.IntegerField(default=0)
currency = models.CharField(max_length=5)
order_status = models.CharField(max_length=50)
payment = models.ForeignKey(PaymentGateway)
user = models.ForeignKey(User)
address = models.TextField()
cities = models.ForeignKey(Cities)
items = models.ForeignKey(MasterItem)
def __unicode__(self):
return self.order_number
|
990,370 | 53fc4b03d09762e13cb4b7fc06108b21eabe7edb | #!/usr/bin/python
# Birthday wish program
# Demonstrates keyword arguments and default parameter values
# written for python2
# positional parameters
def birthday1(name, age):
print "Happy birthday,", name, "!", " I hear you're", age, "today.\n"
# parameters with default values
def birthday2(name = "Jackson", age = 1):
print "Happy birthday,", name, "!", " I hear you're", age, "today.\n"
# The first parameter gets the first value sent, the second parameter
# gets the second value sent, and so on.
# With this particular function call, it means that name gets "Jackson" and age gets 1
birthday1("Jackson", 1)
# If you switch the positions of two arguments, the parameters get different values.
# So with the call birthday1 name gets the first value, 1 , and age gets
# the second value, "Jackson"
birthday1(1, "Jackson")
# Positional parameters get values sent to them in order, unless you tell the
# function otherwise (seen below). Both are valid, since we are giving the values
# by name.
birthday1(name = "Jackson", age = 1)
birthday1(age = 1, name = "Jackson")
# This set of functions demonstrates giving default values
# The first uses name=Jackson, age=1, since the function defines those defaults
# In the second, the name is given, so the default name is not used.
# Same logic goes for the rest.
birthday2()
birthday2(name = "Katherine")
birthday2(age = 12)
birthday2(name = "Katherine", age = 12)
birthday2("Katherine", 12)
raw_input("\n\nPress the enter key to exit.")
|
990,371 | ce726fc3fdd7e3119216cef0358b1f8238768406 | __author__ = '368140'
from pymongo import MongoClient
import logging
import logging.config
import json
import os
import sys
logging.info('Reading config file')
## Initiating logging
default_logging_path = 'logging.json'
default_level = logging.INFO
env_key = 'LOG_CFG' # This environment variable can be set to load corresponding logging doc
#Reading logging configuration file
path = default_logging_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.loads(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(filename='trubox.log', level=default_level)
current_working_directory = os.path.dirname(sys.argv[0])
current_working_directory = os.path.abspath(current_working_directory)
logging.info('Reading config file')
try:
json_data = open(current_working_directory + '/configurationfile.json')
data = json.load(json_data)
json_data.close()
is_config_new = False
except:
logging.error('Cannot read from config file. Cause: %s', sys.exc_info()[1])
sys.exit(4)
logging.info("Successfully read from config file")
'''
try:
mongo_ip = data["MONGOS_CONFIG"]["IP"]
mongo_port = data["MONGOS_CONFIG"]["PORT"]
except:
logging.error('Error reading configuration values. Cause: %s', sys.exc_info()[1])
sys.exit(4)
'''
if is_config_new:
try:
admin_name = data['COMMON_SETTINGS']['admin']['username']
admin_pwd = data['COMMON_SETTINGS']['admin']['password']
tj_read_write_user_name = data['COMMON_SETTINGS']['readwrite']['username']
tj_read_write_user_pwd = data['COMMON_SETTINGS']['readwrite']['password']
tj_read_write_user_roles = data['COMMON_SETTINGS']['readwrite']['roles']
tj_read_user_name = data['COMMON_SETTINGS']['read']['username']
tj_read_user_pwd = data['COMMON_SETTINGS']['read']['password']
tj_read_user_roles = data['COMMON_SETTINGS']['read']['roles']
tj_admin_user_name = data['COMMON_SETTINGS']['Trujunction_username']
mongodump_path = data['COMMON_SETTINGS']['Mongo_Dump_Path']
if "mongo" in data:
mongo_ip = data["mongo"][0]["mongo1"]["IP_Address"]
mongo_port = data["mongo"][0]["mongo1"]["port"]
elif "mongoS" in data:
mongo_ip = data["mongoS"][0]["mongoS1"]["IP_Address"]
mongo_port = data["mongoS"][0]["mongoS1"]["port"]
except:
logging.error('Error reading configuration values. Cause: %s', sys.exc_info()[1])
sys.exit(4)
else:
try:
admin_name = data['USER_ADMIN']['USERNAME']
admin_pwd = data['USER_ADMIN']['PASSWORD']
tj_read_write_user_name = data['USER_READ_WRITE']['USERNAME']
tj_read_write_user_pwd = data['USER_READ_WRITE']['PASSWORD']
tj_read_write_user_roles = data['USER_READ_WRITE']['roles'].split()
tj_read_user_name = data['USER_READ']['USERNAME']
tj_read_user_pwd = data['USER_READ']['PASSWORD']
tj_read_user_roles = data['USER_READ']['roles'].split()
mongo_ip = data["MONGOS_CONFIG"]["IP"]
mongo_port = data["MONGOS_CONFIG"]["PORT"]
#mongodump_path = data['COMMON_SETTINGS']['mongodump_path']
#tj_admin_user_name = data['COMMON_SETTINGS']['Trujunction_username']
except:
logging.error('Error reading configuration values. Cause: %s', sys.exc_info()[1])
sys.exit(4)
company_collection = ['TENANT', 'TENANT_ID']
json_schema_collection_name = "COLLECTION_JSON_SCHEMA"
def create_connection():
connection = None
if "MONGOS_CONFIG" in data:
mongo_ip = data["MONGOS_CONFIG"]["IP"]
mongo_port = data["MONGOS_CONFIG"]["PORT"]
logging.info("Trying to connect to mongo instance %s:%s", mongo_ip, mongo_port)
try:
connection = MongoClient(mongo_ip, int(mongo_port)) # connecting to mongos instance
except:
logging.error('Cannot connect to mongo instance. Cause: %s', sys.exc_info()[1])
sys.exit(5)
else:
connection = None
logging.error("Cannot find key 'mongoS' or 'mongo' in configuration file")
sys.exit(5)
return connection |
990,372 | ccf4d493740c1f1a0e10d7d732441b8b7c52e9ff | import FWCore.ParameterSet.Config as cms
import copy
import sys
sys.setrecursionlimit(10000)
process = cms.Process('runAHtoElecTau')
# import of standard configurations for RECOnstruction
# of electrons, muons and tau-jets with non-standard isolation cones
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 100
#process.MessageLogger.cerr.threshold = cms.untracked.string('INFO')
process.MessageLogger.suppressWarning = cms.untracked.vstring(
"PATTriggerProducer",
"PATElecTauPairProducer",
# Supress warnings in DiTau hist manager
"analyzeAHtoElecTauEventsOS_woBtag",
"analyzeAHtoElecTauEventsOS_wBtag",
"analyzeAHtoElecTauEventsSS_woBtag",
"analyzeAHtoElecTauEventsSS_wBtag"
)
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load('Configuration/StandardSequences/MagneticField_cff')
#process.load('Configuration/StandardSequences/Reconstruction_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = cms.string('START42_V11::All')
#--------------------------------------------------------------------------------
# import sequences for PAT-tuple production
process.load("TauAnalysis.Configuration.producePatTuple_cff")
process.load("TauAnalysis.Configuration.producePatTupleAHtoElecTauSpecific_cff")
# import sequence for event selection
process.load("TauAnalysis.Configuration.selectAHtoElecTau_cff")
process.load("TauAnalysis.RecoTools.filterDataQuality_cfi")
# import sequence for filling of histograms, cut-flow table
# and of run + event number pairs for events passing event selection
process.load("TauAnalysis.Configuration.analyzeAHtoElecTau_cff")
#--------------------------------------------------------------------------------
# print memory consumed by cmsRun
# (for debugging memory leaks)
#process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
# ignoreTotal = cms.untracked.int32(1) # default is one
#)
process.printGenParticleList = cms.EDAnalyzer("ParticleListDrawer",
src = cms.InputTag("genParticles"),
maxEventsToPrint = cms.untracked.int32(100)
)
# print debug information whenever plugins get loaded dynamically from libraries
# (for debugging problems with plugin related dynamic library loading)
#process.add_( cms.Service("PrintLoadingPlugins") )
#--------------------------------------------------------------------------------
process.DQMStore = cms.Service("DQMStore")
#process.savePatTuple = cms.OutputModule("PoolOutputModule",
# patTupleEventContent,
# fileName = cms.untracked.string('patTuple.root')
#)
process.saveAHtoElecTauPlots = cms.EDAnalyzer("DQMSimpleFileSaver",
outputFileName = cms.string('plotsAHtoElecTau.root')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/store/user/jkolb/VBF_HToTauTau_M-120_7TeV-powheg-pythia6-tauola/skimElecTau_428_Fall11_v1/6aa5d932edddb97c8f87b85a020d9993/elecTauSkim_1_1_yQv.root'
)
#skipBadFiles = cms.untracked.bool(True)
)
HLTprocessName = "HLT" # use for 2011 Data
##HLTprocessName = "REDIGI311X" # use for Spring'11 reprocessed MC
#--------------------------------------------------------------------------------
# import utility function for switching pat::Tau input
# to different reco::Tau collection stored on AOD
from PhysicsTools.PatAlgos.tools.tauTools import *
# comment-out to take reco::CaloTaus instead of reco::PFTaus
# as input for pat::Tau production
#switchToCaloTau(process)
# comment-out to take shrinking dR = 5.0/Et(PFTau) signal cone
# instead of fixed dR = 0.07 signal cone reco::PFTaus
# as input for pat::Tau production
#switchToPFTauShrinkingCone(process)
#switchToPFTauFixedCone(process)
#switchToPFTauHPSpTaNC(process)
switchToPFTauHPS(process)
# disable preselection on of pat::Taus
# (disabled also in TauAnalysis/RecoTools/python/patPFTauConfig_cfi.py ,
# but re-enabled after switching tau collection)
process.cleanPatTaus.preselection = cms.string('')
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for managing pat::Jets
from PhysicsTools.PatAlgos.tools.jetTools import *
# uncomment to replace caloJets by pfJets
switchJetCollection(process,
jetCollection = cms.InputTag("ak5PFJets"),
jetCorrLabel = ('AK5PF', ['L1FastJet', 'L2Relative', 'L3Absolute']),
doBTagging = True,
outputModule = "")
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for configuring PAT trigger matching
from PhysicsTools.PatAlgos.tools.trigTools import switchOnTriggerMatching, switchOnTrigger
# make trigger-matched collections of electrons and taus
#from PhysicsTools.PatAlgos.triggerLayer1.triggerMatcher_cfi import cleanElectronTriggerMatchHLTEle27CaloIdVTCaloIsoTTrkIdTTrkIsoT
#process.cleanElectronTriggerMatchHLTElectronPlusTau = cleanElectronTriggerMatchHLTEle27CaloIdVTCaloIsoTTrkIdTTrkIsoT.clone()
#process.cleanElectronTriggerMatchHLTElectronPlusTau.matchedCuts = cms.string( 'path( "HLT_Ele*_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_*IsoPFTau20_v*" )' )
# do matching
#switchOnTriggerMatching(process, triggerMatchers = [ 'cleanElectronTriggerMatchHLTElectronPlusTau' ], hltProcess = HLTprocessName, outputModule = '')
switchOnTrigger(process, hltProcess = HLTprocessName, outputModule = '')
#process.patTrigger.addL1Algos = cms.bool(True)
from TauAnalysis.Configuration.cfgOptionMethods import _setTriggerProcess
_setTriggerProcess(process, cms.InputTag("TriggerResults", "", HLTprocessName))
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for managing pat::METs
import TauAnalysis.Configuration.tools.metTools as metTools
# uncomment to add pfMET
# set Boolean swich to true in order to apply type-1 corrections
metTools.addPFMet(process, correct = False)
# uncomment to replace caloMET by pfMET in all di-tau objects
process.load("TauAnalysis.CandidateTools.diTauPairProductionAllKinds_cff")
metTools.replaceMETforDiTaus(process, cms.InputTag('patMETs'), cms.InputTag('patPFMETs'))
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# do not produce momentum-corrected muons
from TauAnalysis.RecoTools.patLeptonSelection_cff import patMuonSelConfigurator
setattr(patMuonSelConfigurator, "src", "cleanPatMuons" )
process.selectPatMuons = patMuonSelConfigurator.configure(process = process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# make cut changes
from TauAnalysis.Configuration.tools.changeCut import changeCut
# update all tau discriminant to HPS types
changeCut(process,"selectedPatTausLeadTrk",'tauID("decayModeFinding")')
changeCut(process,"selectedPatTausLeadTrkPt",'tauID("decayModeFinding")')
changeCut(process,"selectedPatTausTaNCdiscr",'tauID("byLooseCombinedIsolationDeltaBetaCorr")')
#--------------------------------------------------------------------------------
# before starting to process 1st event, print event content
process.printEventContent = cms.EDAnalyzer("EventContentAnalyzer")
process.filterFirstEvent = cms.EDFilter("EventCountFilter",
numEvents = cms.int32(1)
)
process.n = cms.Path(process.filterFirstEvent + process.printEventContent)
process.o = cms.Path(process.dataQualityFilters)
# Define a generic end path that filters the final events that a pool
# output module can be hooked into if desired.
process.filterFinalEvents = cms.EDFilter("BoolEventFilter",
src = cms.InputTag("isRecAHtoElecTau")
)
process.p = cms.Path(
process.producePatTupleAHtoElecTauSpecific
# + process.printGenParticleList # uncomment to enable print-out of generator level particles
# + process.printEventContent # uncomment to enable dump of event content after PAT-tuple production
+ process.selectAHtoElecTauEvents
+ process.analyzeAHtoElecTauSequence
+ process.saveAHtoElecTauPlots
+ process.isRecAHtoElecTau
+ process.filterFinalEvents
)
# Dummy do-nothing module to allow an empty path
process.dummy = cms.EDProducer("DummyModule")
# Path that option output modules can be hooked into
process.endtasks = cms.EndPath(process.dummy)
process.schedule = cms.Schedule(
process.n,
process.o,
process.p,
process.endtasks
)
#process.options = cms.untracked.PSet(
# wantSummary = cms.untracked.bool(True)
#)
#--------------------------------------------------------------------------------
# import utility function for switching HLT InputTags when processing
# RECO/AOD files produced by MCEmbeddingTool
from TauAnalysis.MCEmbeddingTools.tools.switchInputTags import switchInputTags
#
# comment-out to switch HLT InputTags
#switchInputTags(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for factorization
from TauAnalysis.Configuration.tools.factorizationTools import enableFactorization_runAHtoElecTau
#
# define "hook" for enabling/disabling factorization
# in case running jobs on the CERN batch system
# (needs to be done after process.p has been defined)
#
#__#factorization#
##enableFactorization_runAHtoElecTau(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for applyting Z-recoil corrections to MET
from TauAnalysis.Configuration.tools.mcToDataCorrectionTools import applyZrecoilCorrection_runAHtoElecTau
##applyZrecoilCorrection_runAHtoElecTau(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for applyting electron trigger correction factor
from TauAnalysis.Configuration.tools.mcToDataCorrectionTools import applyElectronTriggerEfficiencyCorrection_runAHtoElecTau
#applyElectronTriggerEfficiencyCorrection_runAHtoElecTau(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# disable event-dump output
# in order to reduce size of log-files
#process.disableEventDump = cms.PSet()
if hasattr(process, "disableEventDump"):
process.analyzeAHtoElecTauEventsOS_wBtag.eventDumps = cms.VPSet()
process.analyzeAHtoElecTauEventsOS_woBtag.eventDumps = cms.VPSet()
process.analyzeAHtoElecTauEventsSS_wBtag.eventDumps = cms.VPSet()
process.analyzeAHtoElecTauEventsSS_woBtag.eventDumps = cms.VPSet()
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility to remove modules operating on GEN-level collections
from TauAnalysis.Configuration.tools.switchToData import *
#
# uncomment when running over DATA samples
##switchToData(process)#
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for disabling estimation of systematic uncertainties
#
from TauAnalysis.Configuration.tools.sysUncertaintyTools import enableSysUncertainties_runAHtoElecTau
#
# define "hook" for keeping enabled/disabling estimation of systematic uncertainties
# in case running jobs on the CERN batch system
# (needs to be done after process.p has been defined)
#__#systematics#
##enableSysUncertainties_runAHtoElecTau(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# sequence to produce full PAT
#
process.producePatTupleAll = cms.Sequence(process.producePatTuple + process.producePatTupleAHtoElecTauSpecific)
#
#--------------------------------------------------------------------------------
# print-out all python configuration parameter information
#print process.dumpPython()
|
990,373 | cf397f4eda58325ccd6a4d875510fccf751c88f8 | from medium import Client
import webbrowser
import sys
import json
import requests
from fake_useragent import UserAgent
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from settings import *
callback_url = "https://lucys-anime-server.herokuapp.com"
ua = UserAgent()
PRIVATE_API_URL = "https://medium.com/_/api"
ME_URL = "https://medium.com/me"
def post_url(post_id):
return PRIVATE_API_URL + "/posts/" + post_id + "/"
def post_responses_url(post_id, filter_args="best"):
return post_url(post_id) + "responses?filter=" + filter_args
def topic_subscription_url(topic):
return ME_URL + "/subscriptions/topic/%s" % topic
def collection_subscription_url(publication):
return ME_URL + "/subscriptions/collection/%s" % publication
def activity_url():
return ME_URL + "/activity"
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text # or whatever
def fix_medium_json_response(data):
return remove_prefix(data, "])}while(1);</x>")
def get_activity(access_token):
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"Authorization": "Bearer %s" % access_token,
"User-Agent":str(ua.random),
"x-xsrf-token": access_token,
"cookie": COOKIE,
"content-type": "application/json"
}
url = activity_url()
r = requests.get(url, headers = headers)
data = json.loads(fix_medium_json_response(r.text))
return data
def subscribe(access_token, topic=None, publication=None):
url = None
if topic is None:
topic = publication.replace(" ", "-")
url = collection_subscription_url(topic)
elif publication is None:
topic = topic.replace(" ", "-")
url = topic_subscription_url(topic)
else:
print("Both topic and publication can't both be none")
return
print(url)
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"Authorization": "Bearer %s" % access_token,
"User-Agent":str(ua.random),
"x-xsrf-token": access_token,
"cookie": COOKIE
}
r = requests.put(url, headers = headers)
print(r.text)
# Get articles from home page
def get_home_articles(access_token):
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"Authorization": "Bearer %s" % access_token,
"User-Agent":str(ua.random),
"x-xsrf-token": access_token,
"cookie": COOKIE
}
try:
r = requests.get(PRIVATE_API_URL + "/home-feed", headers = headers)
data = json.loads(fix_medium_json_response(r.text))
stream_items = data["payload"]["streamItems"]
for post in stream_items:
# print(post)
# print("Continue (y/n)")
# should_continue = sys.stdin.readline().strip()
# if should_continue == "n":
# continue
item_type = post["itemType"]
if item_type == "extremePostPreview":
post_preview = post["extremePostPreview"]
post_id = post_preview["postId"]
print(post_url(post_id))
elif item_type == "extremeAdaptiveSection":
if "items" in post:
items = post["items"]
for item in items:
item_post = item["post"]
post_id = item_post["postId"]
print("----extremeAdaptiveSection!!!!")
print(post_url(post_id))
except requests.exceptions.ConnectionError:
print("Connection refused")
if __name__ == '__main__':
do_auth = True
if do_auth:
# Go to http://medium.com/me/applications to get your application_id and application_secret.
client = Client(application_id=MEDIUM_CLIENT_ID, application_secret=MEDIUM_CLIENT_SECRET)
# Build the URL where you can send the user to obtain an authorization code.
auth_url = client.get_authorization_url("secretstate", callback_url,
["basicProfile", "publishPost", "listPublications"])
# (Send the user to the authorization URL to obtain an authorization code.)
print(auth_url)
webbrowser.open(auth_url, new=2)
print("Authorization code (at the end of the url that was just opened):")
authorization_code = sys.stdin.readline().strip()
# Exchange the authorization code for an access token.
auth = client.exchange_authorization_code(authorization_code,
callback_url)
# The access token is automatically set on the client for you after
# a successful exchange, but if you already have a token, you can set it
# directly.
client.access_token = auth["access_token"]
# Get profile details of the user identified by the access token.
# user = client.get_current_user()
# print(user)
# Get publications
# publications = client._request("GET", "/v1/users/" + user["id"] + "/publications")
# print(publications)
# get_home_articles(client.access_token)
# subscribe(client.access_token, publication="greylock perspectives")
print(get_activity(client.access_token))
# # Create a draft post.
# post = client.create_post(user_id=user["id"], title="Title", content="<h2>Title</h2><p>Content</p>",
# content_format="html", publish_status="draft")
# # When your access token expires, use the refresh token to get a new one.
# client.exchange_refresh_token(auth["refresh_token"])
# # Confirm everything went ok. post["url"] has the location of the created post.
# print("My new post!", post["url"]) |
990,374 | 5a82b3f8af0f23d356db3142a02753c1ff90542b | """
.. module: historical.s3.poller
:platform: Unix
:copyright: (c) 2017 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. author:: Mike Grima <mgrima@netflix.com>
"""
import os
import uuid
import logging
import boto3
from botocore.exceptions import ClientError
from cloudaux.aws.s3 import list_buckets
from raven_python_lambda import RavenLambdaWrapper
from historical.constants import CURRENT_REGION, HISTORICAL_ROLE
from historical.s3.models import s3_polling_schema
from historical.common.accounts import get_historical_accounts
logging.basicConfig()
log = logging.getLogger("historical")
log.setLevel(logging.INFO)
def get_record(all_buckets, index, account):
return {
"Data": bytes(s3_polling_schema.serialize_me(account, {
"bucket_name": all_buckets[index]["Name"],
"creation_date": all_buckets[index]["CreationDate"].replace(tzinfo=None, microsecond=0).isoformat() + "Z"
}), "utf-8"),
"PartitionKey": uuid.uuid4().hex
}
def create_polling_event(account, stream):
# Place onto the S3 Kinesis stream each S3 bucket for each account...
# This should probably fan out on an account-by-account basis (we'll need to examine if this is an issue)
all_buckets = list_buckets(account_number=account,
assume_role=HISTORICAL_ROLE,
session_name="historical-cloudwatch-s3list",
region=CURRENT_REGION)["Buckets"]
client = boto3.client("kinesis", region_name=CURRENT_REGION)
# Need to add all buckets into the stream:
limiter = int(os.environ.get("MAX_BUCKET_BATCH", 50))
current_batch = 1
total_batch = int(len(all_buckets) / limiter)
remainder = len(all_buckets) % limiter
offset = 0
while current_batch <= total_batch:
records = []
while offset < (limiter * current_batch):
records.append(get_record(all_buckets, offset, account))
offset += 1
client.put_records(Records=records, StreamName=stream)
current_batch += 1
# Process remainder:
if remainder:
records = []
while offset < len(all_buckets):
records.append(get_record(all_buckets, offset, account))
offset += 1
client.put_records(Records=records, StreamName=stream)
@RavenLambdaWrapper()
def handler(event, context):
"""
Historical S3 event poller.
This poller is run at a set interval in order to ensure that changes do not go undetected by historical.
Historical pollers generate `polling events` which simulate changes. These polling events contain configuration
data such as the account/region defining where the collector should attempt to gather data from.
"""
log.debug('Running poller. Configuration: {}'.format(event))
for account in get_historical_accounts():
# Skip accounts that have role assumption errors:
try:
create_polling_event(account['id'], os.environ.get("HISTORICAL_STREAM", "HistoricalS3PollerStream"))
except ClientError as e:
log.warning('Unable to generate events for account. AccountId: {account_id} Reason: {reason}'.format(
account_id=account['id'],
reason=e
))
log.debug('Finished generating polling events. Events Created: {}'.format(len(account['id'])))
|
990,375 | 28411c43ca332b061a8422a832fa7e92c159d849 | import numpy as np
import matplotlib.pyplot as plt
top_n = 13
num_terms = 100000
bins = [i for i in range(1, top_n+2)]
fig, ax1 = plt.subplots(figsize=(11, 8.5))
scaled_rps = [ [val/1000.0 for val in row] for row in rps]
ax1.boxplot(scaled_rps, positions = [i+0.5 for i in range(1,14)])
ax1.axis([1,15,0,250])
ax1.set_xlabel('per-patent tf-idf rank of shared term')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('document frequency of shared term, x1,000 \n(median/quartile box-and-whisker plots)')
ax2 = ax1.twinx()
ax2.hist(rphs, alpha=0.1, bins=bins, normed=True)
ax2.set_ylabel('normed number of shared terms per rank')
plt.title('doc-freq distribution and number of shared terms per tf-idf rank\nrand pairs of patents, p1 and p2 data combined')
plt.show()
|
990,376 | 9f40508f3c3671da7a79dfc1ce89b69f0dfa9ce6 | from random import randint
class BinarySearch():
def __init__(self):
self.tree = [None]
def add_element(self, element):
current_index = 0
while True:
if self.tree[current_index] == None:
self.tree[current_index] = element
while len(self.tree) < 2 * current_index + 3:
self.tree.append(None)
return self.tree
elif element < self.tree[current_index]:
current_index = 2 * current_index + 1
elif element > self.tree[current_index]:
current_index = 2 * current_index + 2
else:
return self.tree
def search(self, element):
current_index = 0
while self.tree[current_index] != None:
if element == self.tree[current_index]:
return True
elif element < self.tree[current_index]:
current_index = 2 * current_index + 1
elif element > self.tree[current_index]:
current_index = 2 * current_index + 2
return False
test_tree = BinarySearch()
test = [randint(0,500) for _ in range(1000)]
for value in test:
test_tree.add_element(value)
while True:
test_me = int(input("Pick a test number: "))
print(test_tree.search(test_me))
|
990,377 | 480201c20039b4415cfe75da92fcc914db705a68 | import os
import chi
import numpy as np
import pints
def define_data_generating_model():
# Define mechanistic model
directory = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
mechanistic_model = chi.SBMLModel(
directory + '/models/dixit_growth_factor_model.xml')
mechanistic_model = chi.ReducedMechanisticModel(mechanistic_model)
mechanistic_model.set_outputs([
'central.receptor_active_concentration',
'central.receptor_inactive_concentration'])
mechanistic_model.fix_parameters({
'central.receptor_active_amount': 0,
'central.receptor_inactive_amount': 0,
'central.ligand_amount': 2,
'central.size': 1
})
# Define error model
error_models = [
chi.LogNormalErrorModel(), # active receptor conc.
chi.LogNormalErrorModel()] # inactive receptor conc.
# Define population model
population_model = chi.ComposedPopulationModel([
chi.GaussianModel(dim_names=['Activation rate']),
chi.PooledModel(n_dim=3, dim_names=[
'Deactivation rate', 'Deg. rate (act.)', 'Deg. rate (inact.)']),
chi.GaussianModel(dim_names=['Production rate']),
chi.PooledModel(dim_names=['Sigma act.']),
chi.PooledModel(dim_names=['Sigma inact.'])])
predictive_model = chi.PredictiveModel(mechanistic_model, error_models)
predictive_model = chi.PopulationPredictiveModel(
predictive_model, population_model)
# Define model paramters
parameters = [
1.7, # Mean activation rate
0.05, # Std. activation rate
8, # deactivation rate
0.015, # degradation rate (active)
0.25, # degradation rate (inactive)
1.7, # Mean production rate
0.05, # Std. production rate
0.05, # Sigma act.
0.05] # Sigma inact.
return mechanistic_model, predictive_model, parameters
def generate_measurements(predictive_model, parameters):
# Simulate measurements
seed = 2
n_ids = 5000
times = np.array([1, 5, 10, 15, 20])
dense_measurements = predictive_model.sample(
parameters, times, n_samples=n_ids, seed=seed, return_df=False)
# Keep only one measurement per individual
n_ids = 1000
n_times = len(times)
n_observables = 2
measurements = np.empty(shape=(n_ids, n_observables, n_times))
for idt in range(n_times):
start_ids = idt * n_ids
end_ids = (idt + 1) * n_ids
measurements[:, 0, idt] = dense_measurements[0, idt, start_ids:end_ids]
measurements[:, 1, idt] = dense_measurements[1, idt, start_ids:end_ids]
return measurements, times
def define_log_posterior(measurements, times, mechanistic_model, sigma):
# Define population filter log-posterior
population_filter = chi.GaussianFilter(measurements)
population_model = chi.ComposedPopulationModel([
chi.GaussianModel(dim_names=['Activation rate'], centered=False),
chi.PooledModel(n_dim=3, dim_names=[
'Deactivation rate', 'Deg. rate (act.)', 'Deg. rate (inact.)']),
chi.GaussianModel(dim_names=['Production rate'], centered=False)])
log_prior = pints.ComposedLogPrior(
pints.GaussianLogPrior(2, 0.5), # Mean activation rate
pints.LogNormalLogPrior(-2, 0.5), # Std. activation rate
pints.GaussianLogPrior(10, 2), # deactivation rate
pints.GaussianLogPrior(0.02, 0.005), # degradation rate (active)
pints.GaussianLogPrior(0.3, 0.05), # degradation rate (inactive)
pints.GaussianLogPrior(2, 0.5), # Mean production rate
pints.LogNormalLogPrior(-2, 0.5)) # Std. production rate
log_posterior = chi.PopulationFilterLogPosterior(
population_filter, times, mechanistic_model, population_model,
log_prior, sigma=sigma)
return log_posterior
def run_inference(log_posterior):
# Run inference
seed = 2
controller = chi.SamplingController(log_posterior, seed=seed)
controller.set_n_runs(1)
controller.set_parallel_evaluation(False)
controller.set_sampler(pints.NoUTurnMCMC)
n_iterations = 100
posterior_samples = controller.run(
n_iterations=n_iterations, log_to_screen=True)
# Save samples
directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
posterior_samples.to_netcdf(
directory + '/posteriors/growth_factor_model_2_outputs.nc')
if __name__ == '__main__':
mm, pm, p = define_data_generating_model()
meas, times = generate_measurements(pm, p)
logp = define_log_posterior(meas, times, mm, p[-2:])
run_inference(logp)
|
990,378 | adf532fff8015876498bd6b992f46847fe0f6604 | # -*- coding: utf-8 -*-
from Hero_list import Timo, Jinx
class HeroFatory:
def create_hero(self, name):
if name == "timo":
return Timo()
elif name == "jinx":
return Jinx()
else:
raise Exception("不在英雄池内")
if __name__ == "__main__":
hero = HeroFatory()
timo = hero.create_hero("timo")
jinx = hero.create_hero("jinx")
# timo.fight(1000, 200, "jinx")
result = timo.fight(1000, 200, "jinx")
timo.level_up(1000, 200, result)
|
990,379 | 34e738449eddd1a80bf07de7aeed9155ff35b104 | #!/usr/bin/env python
#
# NEED POSIX (i.e. *Cygwin* on Windows).
"""
Script to bump, commit and tag new versions.
USAGE:
bumpver
bumpver [-n] [-f] [-c] [-a] [-t <message>] <new-ver>
Without <new-ver> prints version extracted from current file.
Don't add a 'v' prefix!
OPTIONS:
-a, --amend Amend current commit for setting the "chore(ver): ..." msg.
-f, --force Bump (and optionally) commit/tag if version exists/is same.
-n, --dry-run Do not write files - just pretend.
-c, --commit Commit afterwardswith a commit-message describing version bump.
-t, --tag=<msg> Adds a signed tag with the given message (commit implied).
- Pre-releases: when working on some version
X.YbN # Beta release
X.YrcN or X.YcN # Release Candidate
X.Y # Final release
- Post-release:
X.YaN.postM # Post-release of an alpha release
X.YrcN.postM # Post-release of a release candidate
- Dev-release:
X.YaN.devM # Developmental release of an alpha release
X.Y.postN.devM # Developmental release of a post-release
EXAMPLE:
bumpver -t 'Mostly model changes' 1.6.2b0
"""
import functools as fnt
import os.path as osp
import re
import sys
from datetime import datetime
import docopt
my_dir = osp.dirname(__file__)
VFILE = osp.join(my_dir, "..", "pypiserver", "__init__.py")
VFILE_regex_version = re.compile(r'version *= *__version__ *= *"([^"]+)"')
VFILE_regex_datetime = re.compile(r'__updated__ *= *"([^"]+)"')
VFILE_regex_date = re.compile(r'__updated__ *= *"([^"\s]+)\s')
RFILE = osp.join(my_dir, "..", "README.md")
PYTEST_ARGS = [osp.join("tests", "test_docs.py")]
class CmdException(Exception):
pass
def get_current_date_info() -> (str, str):
now = datetime.now()
new_datetime = now.strftime("%Y-%m-%d %H:%M:%S%z")
new_date = now.strftime("%Y-%m-%d")
return (new_datetime, new_date)
@fnt.lru_cache()
def read_txtfile(fpath):
with open(fpath, "rt", encoding="utf-8") as fp:
return fp.read()
def extract_file_regexes(fpath, regexes):
"""
:param regexes:
A sequence of regexes to "search", having a single capturing-group.
:return:
One groups per regex, or raise if any regex did not match.
"""
txt = read_txtfile(fpath)
matches = [regex.search(txt) for regex in regexes]
if not all(matches):
raise CmdException(
"Failed extracting current versions with: %s"
"\n matches: %s" % (regexes, matches)
)
return [m.group(1) for m in matches]
def replace_substrings(files, subst_pairs):
for fpath in files:
txt = read_txtfile(fpath)
replacements = []
for old, new in subst_pairs:
replacements.append((old, new, txt.count(old)))
txt = txt.replace(old, new)
yield (txt, fpath, replacements)
def format_syscmd(cmd):
if isinstance(cmd, (list, tuple)):
cmd = " ".join('"%s"' % s if " " in s else s for s in cmd)
else:
assert isinstance(cmd, str), cmd
return cmd
def strip_ver2_commonprefix(ver1, ver2):
cprefix = osp.commonprefix([ver1, ver2])
if cprefix:
striplen = cprefix.rfind(".")
if striplen > 0:
striplen += 1
else:
striplen = len(cprefix)
ver2 = ver2[striplen:]
return ver2
def run_testcases():
import pytest
retcode = pytest.main(PYTEST_ARGS)
if retcode:
raise CmdException(
"Doc TCs failed(%s), probably version-bumping has failed!" % retcode
)
def exec_cmd(cmd):
import subprocess as sbp
err = sbp.call(cmd, stderr=sbp.STDOUT)
if err:
raise CmdException("Failed(%i) on: %s" % (err, format_syscmd(cmd)))
def do_commit(new_ver, old_ver, dry_run, amend, ver_files):
import pathlib
cmt_msg = "chore(ver): bump %s-->%s" % (old_ver, new_ver)
ver_files = [pathlib.Path(f).as_posix() for f in ver_files]
git_add = ["git", "add"] + ver_files
git_cmt = ["git", "commit", "-m", cmt_msg]
if amend:
git_cmt.append("--amend")
commands = [git_add, git_cmt]
for cmd in commands:
cmd_str = format_syscmd(cmd)
if dry_run:
yield "DRYRUN: %s" % cmd_str
else:
yield "EXEC: %s" % cmd_str
exec_cmd(cmd)
def do_tag(tag, tag_msg, dry_run, force):
cmd = ["git", "tag", tag, "-s", "-m", tag_msg]
if force:
cmd.append("--force")
cmd_str = format_syscmd(cmd)
if dry_run:
yield "DRYRUN: %s" % cmd_str
else:
yield "EXEC: %s" % cmd_str
exec_cmd(cmd)
def bumpver(
new_ver, dry_run=False, force=False, amend=False, tag_name_or_commit=None
):
"""
:param tag_name_or_commit:
if true, do `git commit`, if string, also `git tag` with that as msg.
"""
if amend:
## Restore previous version before extracting it.
cmd = "git checkout HEAD~ --".split()
cmd.append(VFILE)
cmd.append(RFILE)
exec_cmd(cmd)
regexes = [VFILE_regex_version, VFILE_regex_datetime, VFILE_regex_date]
old_ver, old_datetime, old_date = extract_file_regexes(VFILE, regexes)
if not new_ver:
yield old_ver
yield old_datetime
yield old_date
else:
if new_ver == old_ver:
msg = "Version '%s'already bumped"
if force:
msg += ", but --force effected."
yield msg % new_ver
else:
msg += "!\n Use of --force recommended."
raise CmdException(msg % new_ver)
new_datetime, new_date = get_current_date_info()
ver_files = [osp.normpath(f) for f in [VFILE, RFILE]]
subst_pairs = [
(old_ver, new_ver),
(old_datetime, new_datetime),
(old_date, new_date),
]
for repl in replace_substrings(ver_files, subst_pairs):
new_txt, fpath, replacements = repl
if not dry_run:
with open(fpath, "wt", encoding="utf-8") as fp:
fp.write(new_txt)
yield "%s: " % fpath
for old, new, nrepl in replacements:
yield " %i x (%24s --> %s)" % (nrepl, old, new)
yield "...now launching DocTCs..."
run_testcases()
if tag_name_or_commit is not None:
yield from do_commit(new_ver, old_ver, dry_run, amend, ver_files)
if isinstance(tag_name_or_commit, str):
tag = "v%s" % new_ver
yield from do_tag(tag, tag_name_or_commit, dry_run, force)
def main(*args):
opts = docopt.docopt(__doc__, argv=args)
new_ver = opts["<new-ver>"]
assert not new_ver or new_ver[0] != "v", (
"Version '%s' must NOT start with `v`!" % new_ver
)
commit = opts["--commit"]
tag = opts["--tag"]
if tag:
tag_name_or_commit = tag
elif commit:
tag_name_or_commit = True
else:
tag_name_or_commit = None
try:
for i in bumpver(
new_ver,
opts["--dry-run"],
opts["--force"],
opts["--amend"],
tag_name_or_commit,
):
print(i)
except CmdException as ex:
sys.exit(str(ex))
except Exception as ex:
print("Unexpected error happened.")
raise ex
if __name__ == "__main__":
main(*sys.argv[1:])
|
990,380 | 8e8db60c156aa43294dc87c7c6348dfa4cecc6c8 |
# merge existing .pkl files into a single pkl file.
import pandas as pd
import numpy as np
import numpy as np
import collections
import matplotlib
# matplotlib.use('Qt4Agg')
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib import rc
# import pandas as pd
from random import *
# import csv
import sys
def analyse_data(Model):
data_1Hz = pd.read_pickle('../1Hz/Summarise_data_Model_%d.pkl'%Model)
data_3Hz = pd.read_pickle('../3Hz/Summarise_data_Model_%d.pkl'%Model)
data_1Hz = data_1Hz.transpose()
data_3Hz = data_3Hz.transpose()
# print data_1Hz.describe()
# print data_1Hz['Baseline']/data_1Hz['Baseline']
# print data_3Hz.describe()
APD_R1 = data_1Hz['Baseline']
APD_R1_Baseline = data_1Hz['Baseline']/data_1Hz['Baseline']
APD_R1_IK2p = data_1Hz['IK2p']/data_1Hz['Baseline']
APD_R1_ISK = data_1Hz['ISK']/data_1Hz['Baseline']
APD_R1_IKur = data_1Hz['IKur']/data_1Hz['Baseline']
APD_R1_IK2p_ISK = data_1Hz['IK2p_ISK']/data_1Hz['Baseline']
APD_R1_IK2p_IKur = data_1Hz['IK2p_IKur']/data_1Hz['Baseline']
APD_R1_ISK_IKur = data_1Hz['ISK_IKur']/data_1Hz['Baseline']
APD_R1_ISK_IKur_Over_IKur = data_1Hz['ISK_IKur']/data_1Hz['IKur']
APD_R1_ISK_IKur_Over_ISK = data_1Hz['ISK_IKur']/data_1Hz['ISK']
APD_R1_IK2P_ISK_IKur = data_1Hz['IK2P_ISK_IKur']/data_1Hz['Baseline']
# print APD_R1_Baseline.describe()
f1 = [APD_R1_Baseline ,APD_R1_IK2p, APD_R1_ISK, APD_R1_IK2p_ISK, APD_R1_IKur,APD_R1_IK2p_IKur,APD_R1_ISK_IKur ,APD_R1_IK2P_ISK_IKur,APD_R1,APD_R1_ISK_IKur_Over_IKur,APD_R1_ISK_IKur_Over_ISK]
for i in range(len(f1)):
f1[i] [f1[i]<0] = np.nan
res1= pd.concat( f1, axis=1,
keys=['Baseline', 'IK2p', 'ISK', 'IK2p_ISK', 'IKur', 'IK2p_IKur', 'ISK_IKur','IK2P_ISK_IKur', 'APD_baseline', 'ISK_IKur_Over_IKur','ISK_IKur_Over_ISK']
)
# print res1.describe()
APD_R3 = data_3Hz['Baseline']
APD_R3_Baseline = data_3Hz['Baseline']/data_3Hz['Baseline']
APD_R3_IK2p = data_3Hz['IK2p']/data_3Hz['Baseline']
APD_R3_ISK = data_3Hz['ISK']/data_3Hz['Baseline']
APD_R3_IKur = data_3Hz['IKur']/data_3Hz['Baseline']
APD_R3_IK2p_ISK = data_3Hz['IK2p_ISK']/data_3Hz['Baseline']
APD_R3_IK2p_IKur = data_3Hz['IK2p_IKur']/data_3Hz['Baseline']
APD_R3_ISK_IKur = data_3Hz['ISK_IKur']/data_3Hz['Baseline']
APD_R3_IK2P_ISK_IKur = data_3Hz['IK2P_ISK_IKur']/data_3Hz['Baseline']
APD_R3_ISK_IKur_Over_IKur = data_3Hz['ISK_IKur']/data_3Hz['IKur']
APD_R3_ISK_IKur_Over_ISK = data_3Hz['ISK_IKur']/data_3Hz['ISK']
# print APD_R1_Baseline.describe()
f2 = [APD_R3_Baseline ,APD_R3_IK2p, APD_R3_ISK, APD_R3_IK2p_ISK, APD_R3_IKur,APD_R3_IK2p_IKur,APD_R3_ISK_IKur ,APD_R3_IK2P_ISK_IKur,APD_R3, APD_R3_ISK_IKur_Over_IKur,APD_R3_ISK_IKur_Over_ISK]
for i in range(len(f1)):
f2[i] [f2[i]<0] = np.nan
res3= pd.concat(f2 , axis=1,
keys=['Baseline', 'IK2p', 'ISK', 'IK2p_ISK', 'IKur', 'IK2p_IKur', 'ISK_IKur','IK2P_ISK_IKur', 'APD_baseline', 'ISK_IKur_Over_IKur','ISK_IKur_Over_ISK']
)
res = res3/res1;
print res.describe()
res.to_csv('APD_Data/Rate_dependence_%d.csv'%Model, sep=',',na_rep='NAN')
res1.to_csv('APD_Data/Rate_dependence_1_%d.csv'%Model, sep=',',na_rep='NAN')
res3.to_csv('APD_Data/Rate_dependence_3_%d.csv'%Model, sep=',',na_rep='NAN')
res.to_pickle('APD_Data/Rate_dependence_%d.pkl'%Model)
res1.to_pickle('APD_Data/Rate_dependence_1_%d.pkl'%Model)
res3.to_pickle('APD_Data/Rate_dependence_3_%d.pkl'%Model)
for i in range(12):
analyse_data(i)
|
990,381 | fce4024640c48290cb22fadf6e124d5346c8916b | """ loader.py
"""
import os
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
def get_loader(config):
root = os.path.join(os.path.abspath(os.curdir), config.dataset)
print('[*] Load data from {0}.'.format(root))
dataset = ImageFolder(
root=root,
transform=transforms.Compose([
transforms.CenterCrop(160),
transforms.Scale(size=config.image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
)
loader = DataLoader(
dataset=dataset,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers
)
return loader
def denorm(x):
return x * 0.5 + 0.5 |
990,382 | 2dddd89352741022111793a41fe395439c1e7e81 | """pyglet_helper.ring contains an object for drawing a ring"""
try:
import pyglet.gl as gl
except Exception as error_msg:
gl = None
from pyglet_helper.objects import Axial
from pyglet_helper.util import Rgb, Tmatrix, Vector
from math import pi, sin, cos, sqrt
class Ring(Axial):
"""
A Ring object
"""
def __init__(self, thickness=0.0, radius=1.0, color=Rgb(),
pos=Vector([0, 0, 0]), axis=Vector([1, 0, 0])):
"""
:param thickness: The ring's thickness.
:type thickness: float
:param radius: The ring's radius.
:type radius: float
:param color: The object's color.
:type color: pyglet_helper.util.Rgb
:param pos: The object's position.
:type pos: pyglet_helper.util.Vector
:param axis: The cone points from the base to the point along the axis.
:type axis: pyglet_helper.util.Vector
"""
super(Ring, self).__init__(radius=radius, color=color, pos=pos,
axis=axis)
self._thickness = None
self.list = None
self.axis = axis
# The radius of the ring's body. If not specified, it is set to 1/10
# of the radius of the body.
self.thickness = thickness
@property
def thickness(self):
"""
Get the ring's thickness (minor radius)
:return: the ring's minor radius
:rtype: float
"""
return self._thickness
@thickness.setter
def thickness(self, new_thickness):
"""
Set the ring's thickness. Will not update until render() is called
again
:param new_thickness: the new thickness (minor radius)
:type new_thickness: float
"""
self._thickness = new_thickness
@property
def material_matrix(self):
"""
Creates a transformation matrix scaled to the size of the torus
:return: the transformation matrix
:return: pyglet_helper.util.Tmatrix
"""
out = Tmatrix()
out.translate(Vector([.5, .5, .5]))
out.scale(Vector([self.radius, self.radius, self.radius]) *
(.5 / (self.radius + self.thickness)))
return out
@property
def degenerate(self):
"""
True if the Ring's major radius is 0
:return:
"""
return self.radius == 0.0
def render(self, scene):
""" Add a ring to the view.
:param scene: The view to render the model into
:type scene: pyglet_helper.objects.View
"""
if self.degenerate:
return
# The number of subdivisions around the hoop's radial direction.
if self.thickness:
band_coverage = scene.pixel_coverage(self.pos, self.thickness)
else:
band_coverage = scene.pixel_coverage(self.pos, self.radius * 0.1)
if band_coverage < 0:
band_coverage = 1000
bands = sqrt(band_coverage * 4.0)
bands = clamp(4, bands, 40)
# The number of subdivisions around the hoop's tangential direction.
ring_coverage = scene.pixel_coverage(self.pos, self.radius)
if ring_coverage < 0:
ring_coverage = 1000
rings = sqrt(ring_coverage * 4.0)
rings = clamp(4, rings, 80)
slices = int(rings)
inner_slices = int(bands)
radius = self.radius
inner_radius = self.thickness
# Create the vertex and normal arrays.
vertices = []
normals = []
outer_angle_step = 2 * pi / (slices - 1)
inner_angle_step = 2 * pi / (inner_slices - 1)
outer_angle = 0.
for i in range(slices):
cos_outer_angle = cos(outer_angle)
sin_outer_angle = sin(outer_angle)
inner_angle = 0.
for j in range(inner_slices):
cos_inner_angle = cos(inner_angle)
sin_inner_angle = sin(inner_angle)
diameter = (radius + inner_radius * cos_inner_angle)
vertex_x = diameter * cos_outer_angle
vertex_y = diameter * sin_outer_angle
vertex_z = inner_radius * sin_inner_angle
normal_x = cos_outer_angle * cos_inner_angle
normal_y = sin_outer_angle * cos_inner_angle
normal_z = sin_inner_angle
vertices.extend([vertex_x, vertex_y, vertex_z])
normals.extend([normal_x, normal_y, normal_z])
inner_angle += inner_angle_step
outer_angle += outer_angle_step
# Create ctypes arrays of the lists
vertices = (gl.GLfloat *len(vertices))(*vertices)
normals = (gl.GLfloat * len(normals))(*normals)
# Create a list of triangle indices.
indices = []
for i in range(slices - 1):
for j in range(inner_slices - 1):
pos = i * inner_slices + j
indices.extend([pos, pos + inner_slices, pos + inner_slices +
1])
indices.extend([pos, pos + inner_slices + 1, pos + 1])
indices = (gl.GLuint * len(indices))(*indices)
# Compile a display list
self.list = gl.glGenLists(1)
gl.glNewList(self.list, gl.GL_COMPILE)
self.color.gl_set(self.opacity)
gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_NORMAL_ARRAY)
self.model_world_transform(scene.gcf,
Vector([self.radius, self.radius,
self.radius])).gl_mult()
gl.glVertexPointer(3, gl.GL_FLOAT, 0, vertices)
gl.glNormalPointer(gl.GL_FLOAT, 0, normals)
gl.glDrawElements(gl.GL_TRIANGLES, len(indices), gl.GL_UNSIGNED_INT,
indices)
gl.glPopClientAttrib()
gl.glEndList()
gl.glCallList(self.list)
def clamp(lower, value, upper):
""" Restrict a value to be between a lower value and upper value. Used to
restrict the number of polygons in the ring object
:param lower: the lowest possible value of value
:type lower: float or int
:param value: the value to check
:type value: float or int
:param upper: the largest possible value of value
:type upper: float or int
:rtype: float or int
:return: the restricted value
"""
if lower > value:
return lower
if upper < value:
return upper
return value
|
990,383 | e7137d0669089683c1ea3c5813ee6b12b027d818 | #coding: utf-8
import os
import logging
import configparser
from seafevents.app.config import appconfig
MAX_LDAP_NUM = 10
class LdapConfig(object):
def __init__(self):
self.host = None
self.base_dn = None
self.user_dn = None
self.passwd = None
self.login_attr = None
self.use_page_result = False
self.follow_referrals = True
self.enable_group_sync = False
self.enable_user_sync = False
self.user_filter = None
self.import_new_user = True
self.user_object_class = None
self.pwd_change_attr = None
self.enable_extra_user_info_sync = False
self.first_name_attr = None
self.last_name_attr = None
self.name_reverse = False
self.dept_attr = None
self.uid_attr = None
self.cemail_attr = None
self.role_name_attr = None
self.group_filter = None
self.group_object_class = None
self.group_member_attr = None
self.group_uuid_attr = None
self.use_group_member_range_query = False
self.user_attr_in_memberUid = None
self.create_department_library = False
self.sync_department_from_ou = False
self.default_department_quota = -2
self.department_repo_permission = None
self.sync_group_as_department = False
self.department_repo_permission = None
self.department_name_attr = None
class Settings(object):
def __init__(self, is_test=False):
# If any of ldap configs allows user-sync/group-sync, user-sync/group-sync task is allowed.
self.enable_group_sync = False
self.enable_user_sync = False
self.sync_department_from_ou = False
# Common configs which only take effect at [LDAP_SYNC] section.
self.sync_interval = 0
self.del_group_if_not_found = False
self.del_department_if_not_found = False
self.enable_deactive_user = False
self.activate_user = True
self.import_new_user = True
# Only all server configs have base info so can we do ldap sync or test.
self.has_base_info = False
# Decide whether load extra_user_info from database or not.
self.load_extra_user_info_sync = False
self.load_uid_attr = False
self.load_cemail_attr = False
self.ldap_configs = []
if appconfig.get('ccnet_conf_path'):
ccnet_conf_path = appconfig.ccnet_conf_path
else:
if is_test:
ccnet_conf_dir = os.environ.get('CCNET_CONF_DIR')
if ccnet_conf_dir:
ccnet_conf_path = os.path.join(ccnet_conf_dir, 'ccnet.conf')
else:
logging.warning('Environment variable CCNET_CONF_DIR and SEAFILE_CENTRAL_CONF_DIR is not define, stop ldap test.')
return
else:
logging.warning('Environment variable CCNET_CONF_DIR and SEAFILE_CENTRAL_CONF_DIR is not define, disable ldap sync.')
return
self.parser = configparser.ConfigParser()
self.parser.read(ccnet_conf_path)
if not self.parser.has_section('LDAP'):
if is_test:
logging.info('LDAP section is not set, stop ldap test.')
else:
logging.info('LDAP section is not set, disable ldap sync.')
return
# We can run test without [LDAP_SYNC] section
has_sync_section = True
if not self.parser.has_section('LDAP_SYNC'):
if not is_test:
logging.info('LDAP_SYNC section is not set, disable ldap sync.')
return
else:
has_sync_section = False
if has_sync_section:
self.read_common_config(is_test)
self.read_multi_server_configs(is_test, has_sync_section)
# If enable_extra_user_info_sync, uid_attr, cemail_attr were configed in any of ldap configs,
# load extra_user_info, uid_attr, cemail_attr from database to memory.
for config in self.ldap_configs:
if config.enable_extra_user_info_sync == True:
self.load_extra_user_info_sync = True
if config.uid_attr != '':
self.load_uid_attr = True
if config.cemail_attr != '':
self.load_cemail_attr = True
def read_common_config(self, is_test):
self.sync_interval = self.get_option('LDAP_SYNC', 'SYNC_INTERVAL', int, 60)
self.del_group_if_not_found = self.get_option('LDAP_SYNC', 'DEL_GROUP_IF_NOT_FOUND', bool, False)
self.del_department_if_not_found = self.get_option('LDAP_SYNC', 'DEL_DEPARTMENT_IF_NOT_FOUND', bool, False)
self.enable_deactive_user = self.get_option('LDAP_SYNC', 'DEACTIVE_USER_IF_NOTFOUND', bool, False)
self.activate_user = self.get_option('LDAP_SYNC', 'ACTIVATE_USER_WHEN_IMPORT', bool, True)
self.import_new_user = self.get_option('LDAP_SYNC', 'IMPORT_NEW_USER', bool, True)
def read_multi_server_configs(self, is_test, has_sync_section=True):
for i in range(0, MAX_LDAP_NUM):
ldap_sec = 'LDAP' if i==0 else 'LDAP_MULTI_%d' % i
sync_sec = 'LDAP_SYNC' if i==0 else 'LDAP_SYNC_MULTI_%d' % i
if not self.parser.has_section(ldap_sec):
break
# If [LDAP_MULTI_1] was configed but no [LDAP_SYNC_MULTI_1], use [LDAP_SYNC] section for this server.
if not self.parser.has_section(sync_sec):
sync_sec = 'LDAP_SYNC'
ldap_config = LdapConfig()
if self.read_base_config(ldap_config, ldap_sec, sync_sec, is_test, has_sync_section) == -1:
return
if not has_sync_section:
self.ldap_configs.append(ldap_config)
continue
if ldap_config.enable_user_sync:
self.read_sync_user_config(ldap_config, ldap_sec, sync_sec)
self.enable_user_sync = True
if ldap_config.enable_group_sync or ldap_config.sync_department_from_ou:
self.read_sync_group_config(ldap_config, ldap_sec, sync_sec)
if ldap_config.enable_group_sync:
self.enable_group_sync = True
if ldap_config.sync_department_from_ou:
self.sync_department_from_ou = True
self.ldap_configs.append(ldap_config)
def read_base_config(self, ldap_config, ldap_sec, sync_sec, is_test, has_sync_section=True):
ldap_config.host = self.get_option(ldap_sec, 'HOST')
ldap_config.base_dn = self.get_option(ldap_sec, 'BASE')
ldap_config.user_dn = self.get_option(ldap_sec, 'USER_DN')
ldap_config.passwd = self.get_option(ldap_sec, 'PASSWORD')
ldap_config.login_attr = self.get_option(ldap_sec, 'LOGIN_ATTR', dval='mail')
ldap_config.use_page_result = self.get_option(ldap_sec, 'USE_PAGED_RESULT', bool, False)
ldap_config.follow_referrals = self.get_option(ldap_sec, 'FOLLOW_REFERRALS', bool, True)
ldap_config.user_filter = self.get_option(ldap_sec, 'FILTER')
ldap_config.group_filter = self.get_option(ldap_sec, 'GROUP_FILTER')
if ldap_config.host == '' or ldap_config.user_dn == '' or ldap_config.passwd == '' or ldap_config.base_dn == '':
if is_test:
logging.warning('LDAP info is not set completely in [%s], stop ldap test.', ldap_sec)
else:
logging.warning('LDAP info is not set completely in [%s], disable ldap sync.', ldap_sec)
self.has_base_info = False
return -1
self.has_base_info = True
if ldap_config.login_attr != 'mail' and ldap_config.login_attr != 'userPrincipalName':
if is_test:
logging.warning("LDAP login attr is not mail or userPrincipalName")
if not has_sync_section:
return
ldap_config.enable_group_sync = self.get_option(sync_sec, 'ENABLE_GROUP_SYNC',
bool, False)
ldap_config.enable_user_sync = self.get_option(sync_sec, 'ENABLE_USER_SYNC',
bool, False)
ldap_config.sync_department_from_ou = self.get_option(sync_sec, 'SYNC_DEPARTMENT_FROM_OU',
bool, False)
def read_sync_group_config(self, ldap_config, ldap_sec, sync_sec):
ldap_config.group_object_class = self.get_option(sync_sec, 'GROUP_OBJECT_CLASS', dval='group')
# If GROUP_FILTER is not set in server level, use value of LDAP_SYNC section
if not ldap_config.group_filter:
ldap_config.group_filter = self.get_option(sync_sec, 'GROUP_FILTER')
ldap_config.group_member_attr = self.get_option(sync_sec,
'GROUP_MEMBER_ATTR',
dval='member')
ldap_config.group_uuid_attr = self.get_option(sync_sec,
'GROUP_UUID_ATTR',
dval='objectGUID')
ldap_config.user_object_class = self.get_option(sync_sec, 'USER_OBJECT_CLASS',
dval='person')
ldap_config.create_department_library = self.get_option(sync_sec,
'CREATE_DEPARTMENT_LIBRARY',
bool, False)
ldap_config.department_repo_permission = self.get_option(sync_sec, 'DEPT_REPO_PERM' ,dval='rw')
ldap_config.default_department_quota = self.get_option(sync_sec,
'DEFAULT_DEPARTMENT_QUOTA',
int, -2)
ldap_config.sync_group_as_department = self.get_option(sync_sec,
'SYNC_GROUP_AS_DEPARTMENT',
bool, False)
ldap_config.use_group_member_range_query = self.get_option(sync_sec,
'USE_GROUP_MEMBER_RANGE_QUERY',
bool, False)
'''
posix groups store members in atrribute 'memberUid', however, the value of memberUid may be not a 'uid',
so we make it configurable, default value is 'uid'.
'''
ldap_config.user_attr_in_memberUid = self.get_option(sync_sec, 'USER_ATTR_IN_MEMBERUID',dval='uid')
ldap_config.department_name_attr = self.get_option(sync_sec, 'DEPT_NAME_ATTR')
ldap_config.department_repo_permission = self.get_option(sync_sec, 'DEPT_REPO_PERM' ,dval='rw')
def read_sync_user_config(self, ldap_config, ldap_sec, sync_sec):
ldap_config.user_object_class = self.get_option(sync_sec, 'USER_OBJECT_CLASS',
dval='person')
# If USER_FILTER is not set in server level, use value of LDAP_SYNC section
if not ldap_config.user_filter:
ldap_config.user_filter = self.get_option(sync_sec, 'USER_FILTER')
ldap_config.pwd_change_attr = self.get_option(sync_sec, 'PWD_CHANGE_ATTR',
dval='pwdLastSet')
ldap_config.enable_extra_user_info_sync = self.get_option(sync_sec, 'ENABLE_EXTRA_USER_INFO_SYNC',
bool, False)
ldap_config.first_name_attr = self.get_option(sync_sec, 'FIRST_NAME_ATTR',
dval='givenName')
ldap_config.last_name_attr = self.get_option(sync_sec, 'LAST_NAME_ATTR',
dval='sn')
ldap_config.name_reverse = self.get_option(sync_sec, 'USER_NAME_REVERSE',
bool, False)
ldap_config.dept_attr = self.get_option(sync_sec, 'DEPT_ATTR',
dval='department')
ldap_config.uid_attr = self.get_option(sync_sec, 'UID_ATTR')
ldap_config.cemail_attr = self.get_option(sync_sec, 'CONTACT_EMAIL_ATTR')
ldap_config.role_name_attr = self.get_option(sync_sec, 'ROLE_NAME_ATTR', dval='')
ldap_config.auto_reactivate_users = self.get_option(sync_sec, 'AUTO_REACTIVATE_USERS', bool, False)
def enable_sync(self):
return self.enable_user_sync or self.enable_group_sync or self.sync_department_from_ou
def get_option(self, section, key, dtype=None, dval=''):
try:
val = self.parser.get(section, key)
if dtype:
val = self.parser.getboolean(section, key) \
if dtype == bool else dtype(val)
return val
except configparser.NoOptionError:
return dval
except ValueError:
return dval
return val if val != '' else dval
|
990,384 | fb70a2fa01d9ece0af30aedfe55cdfc3d6658fb9 | import os
from .utils import jsonfile2dict
class COCO2YOLO:
def __init__(self, annotations_dict, output):
self.labels = annotations_dict
self.coco_id_name_map = self._categories()
self.coco_name_list = list(self.coco_id_name_map.values())
self.output = output
def _categories(self):
categories = {}
for cls in self.labels['categories']:
categories[cls['id']] = cls['name']
return categories
def _load_images_info(self):
images_info = {}
for image in self.labels['images']:
id = image['id']
file_name = image['file_name']
if file_name.find('\\') > -1:
file_name = file_name[file_name.index('\\') + 1:]
w = image['width']
h = image['height']
images_info[id] = (file_name, w, h)
return images_info
def _bbox_2_yolo(self, bbox, img_w, img_h):
x, y, w, h = bbox[0], bbox[1], bbox[2], bbox[3]
centerx = bbox[0] + w / 2
centery = bbox[1] + h / 2
dw = 1 / img_w
dh = 1 / img_h
centerx *= dw
w *= dw
centery *= dh
h *= dh
return centerx, centery, w, h
def _convert_anno(self, images_info):
anno_dict = dict()
for anno in self.labels['annotations']:
bbox = anno['bbox']
image_id = anno['image_id']
category_id = anno['category_id']
image_info = images_info.get(image_id)
image_name = image_info[0]
img_w = image_info[1]
img_h = image_info[2]
yolo_box = self._bbox_2_yolo(bbox, img_w, img_h)
anno_info = (image_name, category_id, yolo_box)
anno_infos = anno_dict.get(image_id)
if not anno_infos:
anno_dict[image_id] = [anno_info]
else:
anno_infos.append(anno_info)
anno_dict[image_id] = anno_infos
return anno_dict
def save_classes(self):
sorted_classes = list(map(lambda x: x['name'], sorted(self.labels['categories'], key=lambda x: x['id'])))
with open('coco.names', 'w', encoding='utf-8') as f:
for cls in sorted_classes:
f.write(cls + '\n')
f.close()
def coco2yolo(self):
images_info = self._load_images_info()
anno_dict = self._convert_anno(images_info)
self._save_txt(anno_dict)
def _save_txt(self, anno_dict):
for k, v in anno_dict.items():
ext_name = v[0][0].split(".")[-1]
file_name = v[0][0][:-len(ext_name)] + 'txt'
with open(os.path.join(self.output, file_name), 'w', encoding='utf-8') as f:
for obj in v:
cat_name = self.coco_id_name_map.get(obj[1])
category_id = self.coco_name_list.index(cat_name)
box = ['{:.6f}'.format(x) for x in obj[2]]
box = ' '.join(box)
line = str(category_id) + ' ' + box
f.write(line + '\n')
def convert_COCO2YOLO(coco_json_path: str, ouput_path: str):
annotations = jsonfile2dict(coco_json_path)
c2y = COCO2YOLO(annotations, ouput_path)
c2y.coco2yolo()
|
990,385 | 77db980ce887df0337024c79b1447b013272879a | import numpy as np
import matplotlib.pyplot as plt
# Define the number of samples
m=100
#Start to generate random feature X1
X1 = 6 * np.random.rand(m, 1) - 3
# Start to generate random feature X2
X2 = 3 * np.random.rand(m, 1) - 3
# Add X1,X2 to X0 which is the intercept feature
X = np.c_[np.ones((100,1)),X1,X2]
# Generate Y as a function of X1 and X2
Y = 0.5 * X2 + X1 * 2 + np.random.randn(m, 1)
# Plot the data generated as opppsed to feature X1 and target Y
plt.scatter(X1,Y)
plt.show()
# print(X)
eta = 0.1 #define the learning rate
n_iter = 100 #define the number of iterations
theta = np.zeros((3,1)) #initialize theta as zeros
# Start the training iterations
for i in range(n_iter):
H = X.dot(theta)
error = H - Y
gradient = (1/m)* (eta)*(X.T.dot(error)) # Calculate the Gradient
theta = theta - gradient
# Print the final thetas that are deduced
print (theta)
|
990,386 | df3b83159d6013982a42be49b4b85b9c4a4cb774 | from django.urls import path
from users import views
urlpatterns = [
path(
route='signup/',
view=views.UsersSignUpView.as_view(),
name='signup'
),
path(
route='login/',
view=views.UsersLoginView.as_view(),
name='login'
)
]
|
990,387 | 92f4ea72568629da7cb27f7d1d44fdec6e0973fb | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-06-19 12:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('my_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('age', models.IntegerField()),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_app.School')),
],
),
migrations.AddField(
model_name='person',
name='age',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
|
990,388 | 74988990f04a8e25bd51e2dcb8448a670bd19e9c | from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
def base(request):
return render(request, 'index.html') |
990,389 | 03471d933ac20a2b70e2d0f29e0fd8810735aa91 | string_to_search = input().lower()
list_of_words = ['sand', 'water', 'fish', 'sun']
word_count = 0
for word in list_of_words:
if word in string_to_search:
word_count += string_to_search.count(word)
print(word_count)
|
990,390 | 9b316ea4e84871d69ec8c9c4b31585cfd40012c1 | import pymongo
import json
from datetime import datetime
# 1. 建立连接
mongoConnStr="mongodb://cj:123456@localhost:27017/?authSource=admin"
client=pymongo.MongoClient(mongoConnStr)
print("list dbs:",client.list_database_names()) # 列出dbs
db=client['mg_test']
print("list collections of mg_test db:",db.list_collection_names()) # 列出collections(类似表)
collection=db['movies']
print("get collection:movies count:",collection.estimated_document_count())
# 2. clear:
# ret=collection.delete_many({})
# print(ret.deleted_count,ret.acknowledged,ret.raw_result)
# 3. update_one -- update or insert record
contents=json.load(open('test.json','r')) # load: file -> string -> python obj
print('update or insert records:')
for record in contents:
id=record.pop('id')
t=datetime.now()
print("to store record...id=%s,title=%s" % (id,record['title']))
record['last_update_time']=t
ret=collection.update_one({'_id':id}
,{'$setOnInsert':{'create_time':t},'$set':record}
,upsert=True)
print(ret.matched_count,ret.modified_count,ret.upserted_id,ret.acknowledged,ret.raw_result)
# 4. find -- list records
print('list stored records:')
results=collection.find({},{'_id':1,'title':1,'rate':1})
for result in results:
print(result)
# results sample:
# {'_id': '27109879', 'rate': '6.5', 'title': '硬核'}
# {'_id': '26707088', 'rate': '7.1', 'title': '奎迪:英雄再起'}
# {'_id': '30334122', 'rate': '6.1', 'title': '芳龄十六'}
# {'_id': '1945750', 'rate': '7.7', 'title': '污垢'}
# {'_id': '26611891', 'rate': '6.8', 'title': '欢乐满人间2'}
print('list rate>=7 records:')
results=collection.find({'rate':{'$gte':"7"}},{'_id':1,'title':1,'rate':1}).limit(5)
for record in results:
print(record)
# results sample:
# {'_id': '26707088', 'rate': '7.1', 'title': '奎迪:英雄再起'}
# {'_id': '1945750', 'rate': '7.7', 'title': '污垢'}
# 5. aggregate -- summary records
print('list summary by rate level')
# $cond:{if:{$gte:['$rating',8]},then:1,else:0}
# $addFields:{'rate_number':{$convert:{input:"$rate",to:"int"}}}
# use $project also could add fields
results=collection.aggregate([
{'$addFields':{
'rate_number':{'$convert':{'input':"$rate",'to':"double"}}
,'rate_level':{'$cond':[
{'$lt':['$rate','7.5']}
,{'$cond':[{'$gte':['$rate','6.5']},'Middle','Low']}
,'High'
]}
}}
# ,{'$project':{
# '_id':1
# ,'rate':1
# ,'title':1
# # ,'rate_level':{'$cond':[
# # {'$lt':['$rate','7.5']}
# # ,{'$cond':[{'$gte':['$rate','6.5']},'Middle','Low']}
# # ,'High'
# # ]}
# }}
,{'$group':{
'_id':"$rate_level"
,'count':{'$sum':1}
,'avg_rate':{'$avg':'$rate_number'}
#,'rate_list':{'$push':'$rate_number'}
,'rate_list':{'$push':{'$concat':['$title',':','$rate']}}
}}
,{'$sort':{'count':-1}}
#,{'$limit':10}
])
for record in results:
print(record)
# results sample:
# {'_id': 'Middle', 'count': 3, 'avg_rate': 6.8, 'rate_list': ['硬核:6.5', '奎迪:英雄再起:7.1', '欢乐满人间2:6.8']}
# {'_id': 'Low', 'count': 1, 'avg_rate': 6.1, 'rate_list': ['芳龄十六:6.1']}
# {'_id': 'High', 'count': 1, 'avg_rate': 7.7, 'rate_list': ['污垢:7.7']}
print("Done!")
|
990,391 | c6cf062a9abbe57540bc0b758c6fb06cab8fc634 | # --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
df = pd.read_csv(path)
print(df.head(5))
#Store features in X
y = df.iloc[:,1]
X = df.iloc[:,:9]
#X = X.drop('list_price', axis=1)
#print(X.head())
#Split dataframe
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=6)
# display first five rows of training features and target
print(X_train.head())
print(y_train.head())
# code ends here
# --------------
import matplotlib.pyplot as plt
# code starts here
cols = X_train.columns
#print(cols)
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(20,20))
for i in range(0,3):
for j in range(0,3):
col = cols[i * 3 + j]
axes[i,j].set_title(col)
axes[i,j].scatter(X_train[col], y_train)
axes[i,j].set_xlabel(col)
axes[i,j].set_ylabel('list_price')
plt.show()
# code ends here
# --------------
# Code starts here
# Code ends here
corr = X_train.corr()
print(corr)
#drop columns from X_train
X_train.drop(['play_star_rating', 'val_star_rating'], axis = 1, inplace=True)
#drop columns form X_test
X_test.drop(['play_star_rating', 'val_star_rating'], axis = 1, inplace=True)
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(r2)
# Code ends here
# --------------
# Code starts here
residual = y_test - y_pred
print(residual)
# Code ends here
|
990,392 | 7fc828c37ff7b450d74078433c4d8fdcd8687ecb | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import DetailView, CreateView, UpdateView, DeleteView
from cities.forms import CityForm
from cities.models import City
from django.contrib import messages
from trains.models import Train
def show_all(request):
city_lst = City.objects.all()
train_lst = Train.objects.all()
tmp = {}
count = {}
for el in train_lst:
tmp.setdefault(el.from_city, [el])
tmp[el.from_city].append(el)
for city in city_lst:
try:
count.setdefault(city, len(tmp[city])-1)
except KeyError:
count[city] = 0
paginator = Paginator(city_lst, 5)
page = request.GET.get('page')
city_lst = paginator.get_page(page)
context = {'city_lst': city_lst, 'trains': count}
return render(request, 'cities/all_cities.html', context)
class CityDetailView(DetailView):
queryset = City.objects.all()
context_object_name = 'cities'
template_name = 'cities/detail.html'
def get_context_data(self, **kwargs):
pk = self.kwargs['pk']
context = {'trains': Train.objects.filter(from_city=pk),
'cities': City.objects.get(id=pk)}
return context
class CityCreateView(SuccessMessageMixin, LoginRequiredMixin, CreateView):
model = City
form_class = CityForm
template_name = 'cities/create.html'
success_url = reverse_lazy('cities:all_cities')
success_message = 'Город успешно создан!'
login_url = '/users/login/'
class CityUpdateView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = City
form_class = CityForm
template_name = 'cities/update.html'
success_url = reverse_lazy('cities:all_cities')
success_message = 'Город успешно обновлен!'
login_url = '/users/login/'
class CityDeleteView(SuccessMessageMixin, LoginRequiredMixin, DeleteView):
model = City
template_name = 'cities/delete.html'
success_url = reverse_lazy('cities:all_cities')
login_url = '/users/login/'
def delete_city(request, pk):
if request.method == 'POST':
city = City.objects.get(id=pk)
city.delete()
messages.error(request, 'Город удален!')
return redirect('cities:all_cities')
|
990,393 | efc64c773ff9e7c2049b82549be9861d78f926f8 |
btws = nx.betweenness_centrality(G)
sorted(btws.items(), key=lambda d:d[1], reverse=True) #you've seen this before
# Answer:
# IGF1 is the second most central in terms of betweenness
# also very visible from the histogram |
990,394 | f6829db0a04cd0f5973892f5ea012ef43fcda00d | print(9*9)
print(9+9)
print("Hello to spyder")
print(type(9))
print(type(9.2))
print(type("hello to spyder"))
|
990,395 | 08fd3f9b918a1503906dab53a12cba2df9ac0c9c | #!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
##################################
# @program synda
# @description climate models data transfer program
# @copyright Copyright “(c)2009 Centre National de la Recherche Scientifique CNRS.
# All Rights Reserved”
# @license CeCILL (https://raw.githubusercontent.com/Prodiguer/synda/master/sdt/doc/LICENSE)
##################################
"""This module contains buffer building function."""
import os
import sys
import sdapp
import sdconst
import sdconfig
from sdtypes import Buffer
from sdexception import SDException
def get_selection_file_buffer(path=None,parameter=[]):
"""Retrieve input facets from file, stdin or command line argument and returns a Buffer object.
Args:
path: selection file path
parameter: facets from command line arguments
"""
# coherence check
if path is not None and len(parameter)>0:
# both file and parameter, raise exception
raise SDException("SYDUTILS-001","Incorrect arguments (path=%s, parameter=%s)"%(path,parameter))
# mode decision
if path is not None:
if path=="-":
mode='stdin'
else:
mode='file'
else:
if len(parameter)>0:
if '-' in parameter: # note that we can't restrict this test for when len(parameter)==1 because some system parameter are added to the list (e.g. add_forced_parameter() in 'sdrfile' module)
# deprecated case: remove this case, as we don't use it anymore (i.e. it's the same result for both branch)
#mode='stdin'
mode='parameter'
else:
mode='parameter'
else:
import select
if select.select([sys.stdin,],[],[],0.0)[0]:
mode='stdin'
else:
mode='parameter'
# perform mode specific routine
if mode=='parameter':
buffer=Buffer(path=sdconst.SELECTION_FROM_CMDLINE,filename=sdconst.SELECTION_FROM_CMDLINE,lines=parameter)
elif mode=='stdin':
lines=sys.stdin.readlines()
if len(lines)==1:
# assume all parameter are on one line with space acting as facet delimiter (i.e. not as value delimiter)
parameter=lines[0].split()
buffer=Buffer(path=sdconst.SELECTION_FROM_STDIN,filename=sdconst.SELECTION_FROM_STDIN,lines=parameter)
else:
# assume same exact format as selection file
lines=[line.rstrip(os.linesep) for line in lines] # remove newline
buffer=Buffer(path=sdconst.SELECTION_FROM_STDIN,filename=sdconst.SELECTION_FROM_STDIN,lines=lines)
elif mode=='file':
path=sdconfig.find_selection_file(path)
with open(path, 'r') as fh:
lines=fh.readlines()
lines=[line.rstrip(os.linesep) for line in lines] # remove newline
buffer=Buffer(path=path,filename=os.path.basename(path),lines=lines)
return buffer
|
990,396 | 3bcc0ad2526c65730586cae7b065e1965ba3a813 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 22 20:14:48 2019
@author: Changze Han
"""
'''
For Flu:
Convert a cleaned fasta sequence(nucleotide or AA) file to a csv format file.
The purpose of this script is innovating a new way for fas->csv conversion.
Before today, we've been doing fasta -> tab(use a web tool) -> copy to excel -> apply func in excel.
The old procedure was really time consuming.
Now: fas --directly--> csv
'''
import csv
from re import compile
# Inputs ================================================================================================
working_dir = r"/Users/Han/Documents/Haim_Lab(2018_summer)/1.20.21_H3N2_H1N1/H1N1/6.3.21/"
fas_input_name = "295-332 B-GP120 No-PNG.fasta"
csv_output_name = "testd.csv"
# ========================================================================================================
fas_input_file = working_dir + fas_input_name
csv_output_file = working_dir + csv_output_name
ACCESSION_MATCHER = compile(r'[A-Za-z]{2}\d{6}|[A-Za-z]{1}\d{5}|[A-Za-z]{3}\d{5}')
fas_input_list = [] # store input fasta contents
csv_output_list = [] #store outputs, a list of lists/rows
def read_fasta(x,y): # read a fasta file x and store into a list y
file = open(x,"r")
for line in file:
y.append(line)
def write_csv(x,y): # write list x into file y
with open(y,'w+') as file:
wr = csv.writer(file, dialect='excel')
wr.writerows(x)
file.close()
def getAccessNum(string):
#extract accession from the given string, returns the first match if there are multiple matches
return ACCESSION_MATCHER.findall(string)[0]
# x is the fas_input_list, construct a list of lists/rows
def construct_output_list(x):
fas_i = 0 # iterate fas list
while fas_i < len(fas_input_list):
row = [] # a list to store this one row's data in csv
#row.append(getAccessNum(fas_input_list[fas_i])) # add accession number
row.append(fas_input_list[fas_i][1:]) # add ALL Attributes
row.append(fas_input_list[fas_i+1]) # add seq
# now add each seq
for j in fas_input_list[fas_i+1]:
row.append(j)
csv_output_list.append(row)
fas_i += 2
read_fasta(fas_input_file,fas_input_list)
construct_output_list(fas_input_list)
write_csv(csv_output_list,csv_output_file)
print("check: ")
print(f"fasta file contains {int(len(fas_input_list)/2)} samples")
print(f"output csv file contains {len(csv_output_list)} samples")
print(f"\nFirst and last seq in fasta: ")
print(f"{fas_input_list[0]}{fas_input_list[1][:10]}......{fas_input_list[1][-10:-1]}")
print(f"{fas_input_list[-2]}{fas_input_list[-1][:10]}......{fas_input_list[-1][-10:-1]}")
print(f"\nFirst and last seq in CSV output: ")
print(f">{csv_output_list[0][0]}\n{csv_output_list[0][2:12]}......{csv_output_list[0][-10:]}")
print(f">{csv_output_list[-1][0]}\n{csv_output_list[-1][2:12]}......{csv_output_list[-1][-10:]}")
|
990,397 | c9bc9af8684a9a5480a92d0ca74769535c64d90d | import torch
import time
from spherical_distortion.util import load_torch_img, torch2numpy
from spherical_distortion.transforms import CameraNormalization
import numpy as np
from skimage import io
import os
# Load the image and it's associated K matrix
os.makedirs('outputs', exist_ok=True)
img = load_torch_img('inputs/synthia-car.png').float()
K = torch.from_numpy(np.loadtxt('inputs/synthia-car-intrinsics.txt')).float()
# Set the output parameters
fov_out = (45, 45)
shape_out = (128, 128)
random_crop = True
# Initialize the camera normalization transform
transform = CameraNormalization(fov_out, shape_out, random_crop)
# Time the operation and print some feedback
print('Input Shape:', img.shape)
s = time.time()
out = transform(img, K)
print('Time:', time.time() - s)
print('New K:', transform.get_K())
print('Output Shape:', out.shape)
# Save the result
os.makedirs('outputs', exist_ok=True)
io.imsave('outputs/normalized-synthia-car.png', torch2numpy(out.byte())) |
990,398 | 90d523835aabfd6a2c2048fa87deb2511eacb60c | """
STATEMENT
Given an array S integers and a target, find all unique quadruplets a, b, c, and d in S such that a + b + c + d = target.
CLARIFICATIONS
- Similar clarifications as 015-3sum.
EXAMPLES
[1, 0, -1, 0, -2, 2], 0 =>
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
"""
import collections
def two_sum(sum, num_list):
'''given a number list and a sum, we try to find whether
we have any two numbers in that num_list that sum up to that number.
'''
result_set = set()
num_set = set(num_list)
for elt in num_set:
if sum-elt in num_set:
if elt!=sum-elt:
result_set.add((elt,sum-elt))
else:
if num_list.count(elt)>=2:
result_set.add((elt,sum-elt))
return list(result_set)
def threesum(target, num_list):
'''returns the list of tuples which sum up to the target'''
result_set = set()
l = len(num_list)
for i in range(l):
temp_result_list = two_sum(target-num_list[i], num_list[:i]+num_list[i+1:])
for elt in temp_result_list:
result_set.add(elt+(num_list[i],))
return list(result_set)
def unique_determiner(tup,dict):
check_set = {tup[i] for i in range(len(tup))}
for elt in dict:
if {elt[i] for i in range(len(elt))}==check_set:
return False
return True
def foursum(target, num_list):
result_set = set()
l = len(num_list)
for i in range(l):
temp_result_list = threesum(target-num_list[i], num_list[:i]+num_list[i+1:])
for elt in temp_result_list:
result_set.add(elt+(num_list[i],))
final_result_dict = {}
for elt in result_set:
if elt not in final_result_dict and unique_determiner(elt, final_result_dict):
final_result_dict[elt]=elt
return list(final_result_dict.values())
print(foursum(0,[1, 0, -1, 0, -2, 2])) |
990,399 | f6a88c9ec20812aadeba7b576a14d9d53668a877 | from calculos.calculos_generales import *
dividir(4, 6)
potencia(4, 6)
redondear(4/6) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.