text stringlengths 38 1.54M |
|---|
from functions import createDbconnection
dbconnection=createDbconnection()
mycursor=dbconnection.cursor()
mycursor.execute("CREATE TABLE transactions(Id int(11)AUTO_INCREMENT primary key,amount int(11),balance int(11),member_Id int(11),agents_Id int(11),clubcards_Id int(11))")
|
class Course:
def __init__(self, name, instructor, room, time):
self.name = name
self.instructor = instructor
self.room = room
self.time = time
def print(self):
print(self.name + ' ' + self.instructor + ' ' + self.room + ' ' + str(self.time)) |
from GeneticSearchDNN import GeneticSearchDNN
from Chromosome import Chromosome
gen = GeneticSearchDNN(40, 8, 8, 4, 10, _testPercentage = 0.2)
gen.searchVerbose('INX', 'TIME_SERIES_DAILY', None,'searches_after-change/search-dnn-stock-40-8-8-4-10-5epochs-5hl-INX-daily', _numberToSave=5, _generations=100, _epochs=5, _batchSize=100, _maxHL=5, _initialPopulation=None, _goodLoss = 0, _goodDA = 0.9)
|
import re
import json
# def sum_of_all_numbers():
# file = open('day12.txt').read()
# all_numbers = re.findall('\-?[0-9]+', file)
# result = 0
# for i in all_numbers:
# result += int(i)
# print(result)
def hook(obj):
if "red" in obj.values():
return {}
else:
return obj
def get_non_red_numbers(content):
elements = str(json.loads(content, object_hook=hook))
return map(int, re.findall(r"[-\d]+", elements))
if __name__ == "__main__":
print(sum(get_non_red_numbers(open('day12.txt').readline()))) |
import os
import pandas as pd
import pytest
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_lib_data_ingest.common.constants import RACE
from kf_lib_data_ingest.common.errors import InvalidIngestStageParameters
@pytest.fixture(scope="function")
def df():
"""
Reusable test dataframe
"""
return pd.DataFrame(
[
{
CONCEPT.PARTICIPANT.ID: "P1",
CONCEPT.BIOSPECIMEN_GROUP.ID: "G1",
CONCEPT.BIOSPECIMEN.ID: "B1",
CONCEPT.PARTICIPANT.RACE: RACE.WHITE,
},
{
CONCEPT.PARTICIPANT.ID: "P1",
CONCEPT.BIOSPECIMEN_GROUP.ID: "G1",
CONCEPT.BIOSPECIMEN.ID: "B2",
CONCEPT.PARTICIPANT.RACE: RACE.WHITE,
},
{
CONCEPT.PARTICIPANT.ID: "P2",
CONCEPT.BIOSPECIMEN_GROUP.ID: "G1",
CONCEPT.BIOSPECIMEN.ID: "B3",
CONCEPT.PARTICIPANT.RACE: RACE.ASIAN,
},
]
)
def test_invalid_run_parameters(guided_transform_stage, **kwargs):
"""
Test running transform with invalid run params
"""
# Bad keys
with pytest.raises(InvalidIngestStageParameters):
guided_transform_stage.run({i: "foo" for i in range(5)})
# Bad values
with pytest.raises(InvalidIngestStageParameters):
guided_transform_stage.run({"foor": None for i in range(5)})
def test_read_write(guided_transform_stage, df):
"""
Test TransformStage.read_output/write_output
"""
extract_output = {"extract_config_url": df}
# Transform outputs json
output = guided_transform_stage.run(extract_output)
recycled_output = guided_transform_stage.read_output()
for target_entity, data in output.items():
assert target_entity in recycled_output
other_data = recycled_output[target_entity]
# Compare using DataFrames
assert (
pd.DataFrame(other_data)
.sort_index(axis=1)
.equals(pd.DataFrame(data).sort_index(axis=1))
)
assert os.path.isfile(
os.path.join(guided_transform_stage.stage_cache_dir, target_entity)
+ ".tsv"
)
|
class calculator:
m_text_list = []
def __init__(self):
self.text_list=[]
def sum_mul(self, choice, *args):
if choice == "sum":
result = 0
for i in args:
result = result + i
elif choice == "mul":
result = 1
for i in args:
result = result * i
return result
class MyRange:
def __init__(self, start, end):
self.current = start
self.end = end
def __iter__(self):
return self
def __next__(self):
if self.current <= self.end:
current = self.current
self.current +=1
return current
else:
raise StopIteration()
def YourRange(start, end):
current = start
while current<end:
yield current
current += 1
return
################################################################
if __name__ == '__main__':
cal = calculator()
print('calculator instance = ',cal.sum_mul('mul',2,2,3))
for i in MyRange(0,5):
print(i)
print()
for i in YourRange(0, 5):
print(i)
|
import unittest
from src.application import Application
class TestStringMethods(unittest.TestCase):
def test_server_socket_status(self):
def get_handler(req, res):
"""
GET handler
:param src.http_request.HttpRequest req:
:param src.http_response.HttpResponse res:
:return:
"""
res.json('{ "standard": "json", "format": "from", "python": 3 }')
app = Application()
app.get('/', get_handler)
app.listen(5003)
if __name__ == '__main__':
unittest.main() |
"""
Modeling: Mass Total + Source Inversion
=======================================
In this script, we fit `Interferometer` data with a strong lens model where:
- The lens galaxy's light is omitted (and is not present in the simulated data).
- The lens galaxy's total mass distribution is an `EllIsothermal` and `ExternalShear`.
- The source galaxy's light is a parametric `VoronoiMagnification` `Pixelization` and `Constant`
regularization.
"""
# %matplotlib inline
# from pyprojroot import here
# workspace_path = str(here())
# %cd $workspace_path
# print(f"Working Directory has been set to `{workspace_path}`")
from os import path
import autofit as af
import autolens as al
import autolens.plot as aplt
import numpy as np
"""
__Masking__
We define the ‘real_space_mask’ which defines the grid the image the strong lens is evaluated using.
"""
real_space_mask = al.Mask2D.circular(
shape_native=(151, 151), pixel_scales=0.05, radius=3.0
)
"""
__Dataset__
Load and plot the strong lens `Interferometer` dataset `mass_sie__source_sersic` from .fits files , which we will fit
with the lens model.
"""
dataset_name = "mass_sie__source_sersic"
dataset_path = path.join("dataset", "interferometer", dataset_name)
interferometer = al.Interferometer.from_fits(
visibilities_path=path.join(dataset_path, "visibilities.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
uv_wavelengths_path=path.join(dataset_path, "uv_wavelengths.fits"),
real_space_mask=real_space_mask,
)
interferometer_plotter = aplt.InterferometerPlotter(interferometer=interferometer)
interferometer_plotter.subplot_interferometer()
interferometer_plotter.subplot_dirty_images()
"""
We now create the `Interferometer` object which is used to fit the lens model.
This includes a `SettingsInterferometer`, which includes the method used to Fourier transform the real-space
image of the strong lens to the uv-plane and compare directly to the visiblities. We use a non-uniform fast Fourier
transform, which is the most efficient method for interferometer datasets containing ~1-10 million visibilities.
"""
settings_interferometer = al.SettingsInterferometer(
transformer_class=al.TransformerNUFFT
)
interferometer = interferometer.apply_settings(settings=settings_interferometer)
"""
__Model__
We compose our lens model using `Model` objects, which represent the galaxies we fit to our data. In this
example our lens model is:
- The lens galaxy's total mass distribution is an `EllIsothermal` with `ExternalShear` [7 parameters].
- An `EllSersic` `LightProfile` for the source galaxy's light [7 parameters].
The number of free parameters and therefore the dimensionality of non-linear parameter space is N=12.
NOTE:
**PyAutoLens** assumes that the lens galaxy centre is near the coordinates (0.0", 0.0").
If for your dataset the lens is not centred at (0.0", 0.0"), we recommend that you either:
- Reduce your data so that the centre is (`autolens_workspace/notebooks/preprocess`).
- Manually override the lens model priors (`autolens_workspace/notebooks/imaging/modeling/customize/priors.py`).
"""
lens = af.Model(
al.Galaxy, redshift=0.5, mass=al.mp.EllIsothermal, shear=al.mp.ExternalShear
)
source = af.Model(al.Galaxy, redshift=1.0, bulge=al.lp.EllSersic)
model = af.Collection(galaxies=af.Collection(lens=lens, source=source))
"""
__Search__
The lens model is fitted to the data using a non-linear search. In this example, we use the nested sampling algorithm
Dynesty (https://dynesty.readthedocs.io/en/latest/).
The folder `autolens_workspace/notebooks/imaging/modeling/customize/non_linear_searches` gives an overview of the
non-linear searches **PyAutoLens** supports. If you are unclear of what a non-linear search is, checkout chapter 2 of
the **HowToLens** lectures.
The `name` and `path_prefix` below specify the path where results ae stored in the output folder:
`/autolens_workspace/output/imaging/mass_sie__source_sersic/mass[sie]_source[bulge]/unique_identifier`.
__Unique Identifier__
In the path above, the `unique_identifier` appears as a collection of characters, where this identifier is generated
based on the model, search and dataset that are used in the fit.
An identical combination of model, search and dataset generates the same identifier, meaning that rerunning the
script will use the existing results to resume the model-fit. In contrast, if you change the model, search or dataset,
a new unique identifier will be generated, ensuring that the model-fit results are output into a separate folder.
"""
search = af.DynestyStatic(
path_prefix=path.join("interferometer"),
name="mass[sie]_source[inversion]",
unique_tag=dataset_name,
nlive=50,
)
"""
__Analysis__
The `AnalysisInterferometer` object defines the `log_likelihood_function` used by the non-linear search to fit the
model to the `Interferometer`dataset.
For interferometer model-fits, we include a `SettingsInversion` object which describes how the linear algebra
calculations required to use an `Inversion` are performed. One of two different approaches can be used:
- **Matrices:** Use a numerically more accurate matrix formalism to perform the linear algebra. For datasets
of < 100 0000 visibilities this approach is computationally feasible, and if your dataset is this small we we recommend
that you use this option (by setting `use_linear_operators=False`. However, larger visibility datasets these matrices
require excessive amounts of memory (> 16 GB) to store, making this approach unfeasible.
- **Linear Operators (default)**: These are slightly less accurate, but do not require excessive amounts of memory to
store the linear algebra calculations. For any dataset with > 1 million visibilities this is the only viable approach
to perform lens modeling efficiently.
"""
settings_inversion = al.SettingsInversion(use_linear_operators=True)
analysis = al.AnalysisInterferometer(
dataset=interferometer, settings_inversion=settings_inversion
)
"""
__Model-Fit__
We can now begin the model-fit by passing the model and analysis object to the search, which performs a non-linear
search to find which models fit the data with the highest likelihood.
Checkout the output folder for live outputs of the results of the fit, including on-the-fly visualization of the best
fit model!
"""
result = search.fit(model=model, analysis=analysis)
"""
__Result__
The search returns a result object, which includes:
- The lens model corresponding to the maximum log likelihood solution in parameter space.
- The corresponding maximum log likelihood `Tracer` and `FitInterferometer` objects.
- Information on the posterior as estimated by the `Dynesty` non-linear search.
"""
print(result.max_log_likelihood_instance)
tracer_plotter = aplt.TracerPlotter(
tracer=result.max_log_likelihood_tracer, grid=real_space_mask.masked_grid_sub_1
)
tracer_plotter.subplot_tracer()
fit_interferometer_plotter = aplt.FitInterferometerPlotter(
fit=result.max_log_likelihood_fit
)
fit_interferometer_plotter.subplot_fit_interferometer()
fit_interferometer_plotter.subplot_fit_dirty_images()
dynesty_plotter = aplt.DynestyPlotter(samples=result.samples)
dynesty_plotter.cornerplot()
"""
Checkout `autolens_workspace/notebooks/interferometer/modeling/results.py` for a full description of the result object.
"""
|
import random
import math
def rand_probability():
return float(random.randint(0, 100)) / 100.0
def euclidean_distance(start, end):
return math.sqrt((start[0] - end[0])**2 + (start[1] - end[1])**2)
|
from django.contrib import admin
# Register your models here.
from compras.models import Producto, Pedido, Lineapedido
class LineapedidoInline(admin.StackedInline):
model = Lineapedido
extra = 0
class PedidoAdmin(admin.ModelAdmin):
list_display = ['id','usuario','fecha']
inlines = [LineapedidoInline]
admin.site.register(Producto)
admin.site.register(Pedido,PedidoAdmin)
|
import pygame
from proton.component import *
from pygame import Color
from proton.component import Component
from proton.ui.uigraphics import UIGraphics
from proton.ui.uigraphics import Dims
class TextComponent(UIGraphics):
def __init__(self, _gameobject):
"""
:param _gameobject:
"""
super(TextComponent, self).__init__(_gameobject)
self.text = None
self.textSize = 1
#self.textColor = Color.Red
self.setup(Dims(100,200,200,100))
self.font = pygame.font.SysFont('Monospace', 100)
def settext(self,txt, dims, color):
self.text=txt
textsurface = self.font.render(self.text, False, color)
self.game_object().graphics.set_sprite_obj(textsurface)
self.setup(dims)
|
from hwt.synthesizer.exceptions import TypeConversionErr
class InvalidVHDLTypeExc(Exception):
def __init__(self, vhdlType):
self.vhdlType = vhdlType
def __str__(self):
variableName = self.variable.name
return ("Invalid type, width is %s in the context of variable %s"
% (str(self.vhdlType.getWidth()), variableName))
def __repr__(self):
return self.__str__()
class HdlType():
def __init__(self):
self.constrain = None
def __eq__(self, other):
return type(self) is type(other)
def __hash__(self):
return hash((self.name, self.constrain))
def fromPy(self, v):
return self.getValueCls().fromPy(v, self)
def convert(self, sigOrVal, toType):
if sigOrVal._dtype == toType:
return sigOrVal
try:
c = self._convert
except AttributeError:
c = self.getConvertor()
self._convertor = c
return c(self, sigOrVal, toType)
@classmethod
def getConvertor(cls):
return HdlType.defaultConvert
def defaultConvert(self, sigOrVal, toType):
raise TypeConversionErr("Conversion of %r of type \n%r to type %r is not implemented"
% (sigOrVal, self, toType))
def __repr__(self):
return "<HdlType %s>" % (self.__class__.__name__)
|
import requests, bs4
res = requests.get('https://s155-en.ogame.gameforge.com/game/index.php?page=overview')
res.text
soup = bs4.BeautifulSoup(res.text, 'html.parser')
#rec = soup.select('#scoreContentField')
print(rec) |
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path as osp
import numpy as np
from utils.wav_processing import *
class WaveRNNIE:
def __init__(self, model_upsample, model_rnn, ie, target=11000, overlap=550, hop_length=275, bits=9, device='CPU',
verbose=False, upsampler_width=-1):
"""
return class provided WaveRNN inference.
:param model_upsample: path to xml with upsample model of WaveRNN
:param model_rnn: path to xml with rnn parameters of WaveRNN model
:param target: length of the processed fragments
:param overlap: overlap of the processed frames
:param hop_length: The number of samples between successive frames, e.g., the columns of a spectrogram.
:return:
"""
self.verbose = verbose
self.device = device
self.target = target
self.overlap = overlap
self.dynamic_overlap = overlap
self.hop_length = hop_length
self.bits = bits
self.indent = 550
self.pad = 2
self.batch_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256]
self.ie = ie
self.upsample_net = self.load_network(model_upsample)
if upsampler_width > 0:
orig_shape = self.upsample_net.input_info['mels'].input_data.shape
self.upsample_net.reshape({"mels" : (orig_shape[0], upsampler_width, orig_shape[2])})
self.upsample_exec = self.create_exec_network(self.upsample_net)
self.rnn_net = self.load_network(model_rnn)
self.rnn_exec = self.create_exec_network(self.rnn_net, batch_sizes=self.batch_sizes)
# fixed number of the mels in mel-spectrogramm
self.mel_len = self.upsample_net.input_info['mels'].input_data.shape[1] - 2 * self.pad
self.rnn_width = self.rnn_net.input_info['x'].input_data.shape[1]
def load_network(self, model_xml):
model_bin_name = ".".join(osp.basename(model_xml).split('.')[:-1]) + ".bin"
model_bin = osp.join(osp.dirname(model_xml), model_bin_name)
print("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = self.ie.read_network(model=model_xml, weights=model_bin)
return net
def create_exec_network(self, net, batch_sizes=None):
if batch_sizes is not None:
exec_net = []
for b_s in batch_sizes:
net.batch_size = b_s
exec_net.append(self.ie.load_network(network=net, device_name=self.device))
else:
exec_net = self.ie.load_network(network=net, device_name=self.device)
return exec_net
@staticmethod
def get_rnn_init_states(b_size=1, rnn_dims=328):
h1 = np.zeros((b_size, rnn_dims), dtype=float)
h2 = np.zeros((b_size, rnn_dims), dtype=float)
x = np.zeros((b_size, 1), dtype=float)
return h1, h2, x
def forward(self, mels):
n_parts = mels.shape[1] // self.mel_len + 1 if mels.shape[1] % self.mel_len > 0 else mels.shape[
1] // self.mel_len
upsampled_mels = []
aux = []
last_padding = 0
for i in range(n_parts):
i_start = i * self.mel_len
i_end = i_start + self.mel_len
if i_end > mels.shape[1]:
last_padding = i_end - mels.shape[1]
mel = np.pad(mels[:, i_start:mels.shape[1], :], ((0, 0), (0, last_padding), (0, 0)), 'constant',
constant_values=0)
else:
mel = mels[:, i_start:i_end, :]
upsampled_mels_b, aux_b = self.forward_upsample(mel)
upsampled_mels.append(upsampled_mels_b)
aux.append(aux_b)
if len(aux) > 1:
upsampled_mels = np.concatenate(upsampled_mels, axis=1)
aux = np.concatenate(aux, axis=1)
else:
upsampled_mels = upsampled_mels[0]
aux = aux[0]
if last_padding > 0:
upsampled_mels = upsampled_mels[:, :-last_padding * self.hop_length, :]
aux = aux[:, :-last_padding * self.hop_length, :]
upsampled_mels, (_, self.dynamic_overlap) = fold_with_overlap(upsampled_mels, self.target, self.overlap)
aux, _ = fold_with_overlap(aux, self.target, self.overlap)
audio = self.forward_rnn(mels, upsampled_mels, aux)
return audio
def forward_upsample(self, mels):
mels = pad_tensor(mels, pad=self.pad)
out = self.upsample_exec.infer(inputs={"mels": mels})
upsample_mels, aux = out["upsample_mels"][:, self.indent:-self.indent, :], out["aux"]
return upsample_mels, aux
def forward_rnn(self, mels, upsampled_mels, aux):
wave_len = (mels.shape[1] - 1) * self.hop_length
d = aux.shape[2] // 4
aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)]
b_size, seq_len, _ = upsampled_mels.shape
seq_len = min(seq_len, aux_split[0].shape[1])
if b_size not in self.batch_sizes:
raise Exception('Incorrect batch size {0}. Correct should be 2 ** something'.format(b_size))
active_network = self.batch_sizes.index(b_size)
h1, h2, x = self.get_rnn_init_states(b_size, self.rnn_width)
output = []
for i in range(seq_len):
m_t = upsampled_mels[:, i, :]
a1_t, a2_t, a3_t, a4_t = \
(a[:, i, :] for a in aux_split)
out = self.rnn_exec[active_network].infer(inputs={"m_t": m_t, "a1_t": a1_t, "a2_t": a2_t, "a3_t": a3_t,
"a4_t": a4_t, "h1.1": h1, "h2.1": h2, "x": x})
logits = out["logits"]
h1 = out["h1"]
h2 = out["h2"]
sample = infer_from_discretized_mix_logistic(logits)
x = sample[:]
x = np.expand_dims(x, axis=1)
output.append(sample)
output = np.stack(output).transpose(1, 0)
output = output.astype(np.float64)
if b_size > 1:
output = xfade_and_unfold(output, self.dynamic_overlap)
else:
output = output[0]
fade_out = np.linspace(1, 0, 20 * self.hop_length)
output = output[:wave_len]
output[-20 * self.hop_length:] *= fade_out
return output
|
import pandas as pd
df = pd.read_csv(
"AC_ratings_google3m_koeper_SiW.csv", error_bad_lines=False, delimiter="\t"
)
train_df = df.sample(n=50000)
val_df = df.sample(n=2500)
train_df.to_csv("train/AC_ratings_google3m_koeper_SiW.csv", sep="\t")
val_df.to_csv("val/AC_ratings_google3m_koeper_SiW.csv", sep="\t")
|
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
from . import runner
from . import analyze_runner, check, findjob, raw_runner, rsync_runner
# The `sql_runner` module is not currently Python 2.4 compatible.
if not sys.version.startswith('2.4.'):
from . import sql_runner
try:
from . import cachesync_runner
except:
pass
from .runner import run_cmdline
from .tools import make, make2, reval, resolve, DD, defaults_merge, flatten, expand
|
import sys
import os
def binary_search(list, key):
first=0
last=len(list)-1
found=False
while( first <= last and not found):
mid = (first+last)//2
if list[mid] == key:
found=True
else:
if key<list[mid]:
last=mid-1
else:
first=mid+1
return found
print(binary_search([1,2,3,4,5], 8))
print(binary_search([1,2,8,4,5], 8))
print(binary_search([1,4,9,12,5], 4)) |
# Copyright 2019 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Python demo showing how to use the MLPerf Inference load generator bindings over the network.
This part of the demo runs the "demo SUT" which is connected over the network to the LON node.
A corresponding "demo LON node" with the demo test is implemented in py_demo_server_lon.py.
The SUT is implemented using a Flask server, with dummy implementation of the inference processing.
Two endpoints are exposed:
- /predict/ : Receives a query (e.g., a text) runs inference, and returns a prediction.
- /getname/ : Get the name of the SUT.
The current implementation is a dummy implementation, which does not use
a real DNN model, batching, or pre/postprocessing code,
but rather just returns subset of the input query as a response,
Yet, it illustrates the basic structure of a SUT server.
"""
import argparse
from flask import Flask, request, jsonify
app = Flask(__name__)
node = ""
def preprocess(query):
"""[SUT Node] A dummy preprocess."""
# Here may come for example batching, tokenization, resizing, normalization, etc.
response = query
return response
def dnn_model(query):
"""[SUT Node] A dummy DNN model."""
# Here may come for example a call to a dnn model such as resnet, bert, etc.
response = query
return response
def postprocess(query):
"""[SUT Node] A dummy postprocess."""
# Here may come for example a postprocessing call, e.g., NMS, detokenization, etc.
response = query
return response
@app.route('/predict/', methods=['POST'])
def predict():
"""Receives a query (e.g., a text) runs inference, and returns a prediction."""
query = request.get_json(force=True)['query']
result = postprocess(dnn_model(preprocess(query)))
return jsonify(result=result)
@app.route('/getname/', methods=['POST', 'GET'])
def getname():
"""Returns the name of the SUT."""
return jsonify(name=f'Demo SUT (Network SUT) node' + (' ' + node) if node else '')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=8000)
parser.add_argument('--node', type=str, default="")
args = parser.parse_args()
node = args.node
app.run(debug=False, port=args.port)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018/12/21 10:10 @Author : xycfree
# @Descript:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
# ========================================================== #
# chrome headless, 设置无头浏览器
# http://brucedone.com/archives/1201
# http://www.cnblogs.com/fnng/p/7797839.html
# chrome_options = Options()
# chrome_options.add_argument('--headless')
# driver = webdriver.Chrome(chrome_options=chrome_options)
# ========================================================== #
chrome_driver = r"D:\tools\chrome_driver\chromedriver.exe"
driver = webdriver.Chrome()
driver.get("https://cn.tradingview.com/chart/rP0mQCuj/")
driver.maximize_window()
driver.implicitly_wait(3)
driver.find_element_by_link_text("登录").click()
driver.find_element_by_name("username").send_keys('bingpoli')
driver.find_element_by_name("password").send_keys('bingpoli123')
driver.find_element_by_class_name("tv-button__loader").click()
driver.implicitly_wait(3)
cookies = driver.get_cookies()
print('cookies: {}'.format(cookies))
# js = ""
# driver.execute_script() |
from Car_Detection_TF.yolo import YOLO
from Mosse_Tracker.TrackerManager import *
from PIL import Image
from VIF.vif import VIF
"""
Unit test for VIF class.
"""
def init_tracker():
cap = cv2.VideoCapture('videos/Easy.mp4')
ret, frame = cap.read()
yolo = YOLO()
image = Image.fromarray(frame)
img, bboxes = yolo.detect_image(image)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
trackers = []
for i, bbox in enumerate(bboxes):
label = bbox[0]
xmin = int(bbox[1])
xmax = int(bbox[2])
ymin = int(bbox[3])
ymax = int(bbox[4])
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
tr = Tracker(frame_gray, (xmin, ymin, xmax, ymax), 480, 360, 1)
trackers.append(tr)
for i in range(30):
ret, frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for j, t in enumerate(trackers):
t.update(frame_gray)
return trackers
def test_vif_init():
"""
Verify that object is created successfully
"""
try:
vif = VIF()
assert 1
except:
assert 0
def test_vif_process():
"""
Verify the results of processing set of frames for a given tracker
"""
try:
trackers = init_tracker()
# vif = VIF()
print(trackers[0].getHistory())
# vif.process(trackers[0].getHistory())
assert 1
except:
assert 0
|
from time import sleep
import pygame
import glob
import os
from pygame import event
from auxx import *
pygame.init()
BG_COLOR = (83,209,212)
#Quick helper function for getting board coordinates
def coordToBoard(coord):
print('coord = ', coord)
x = coord[0] * (boardSize/10)
y = boardSize - (coord[1] + 1) * (boardSize/10)
return((x,y))
class Marker(pygame.sprite.Sprite):
def __init__(self,png, pos, mark):
super(Marker,self).__init__()
self.image = pygame.image.load(png)
# w, h = self.image.get_size()
# w *= (boardSize / 750) / 8
# h *= (boardSize / 750) / 8
self.w = int(boardSize / 10)
self.h = int(boardSize / 10)
# self.image = pygame.transform.scale(self.image, (int(w), int(h)))
self.image = pygame.transform.smoothscale(self.image, (self.w, self.h))
self.rect = self.image.get_rect()
pygame.Surface.set_colorkey(self.image,[0,0,0])
self.pos = pos
if type(self.pos[0]) == int:
self.offset = self.pos
else:
self.offset = self.pos[0]
self.mark = mark
self.rect.topleft = coordToBoard(self.offset)
def draw(self, surface):
surface.blit(self.image, self.rect)
class Ship(pygame.sprite.Sprite):
def __init__(self,png,pos):
super(Ship,self).__init__()
self.image = pygame.image.load(png)
w, h = self.image.get_size()
w *= (boardSize / 750) / 8
h *= (boardSize / 750) / 8
self.image = pygame.transform.scale(self.image, (int(w), int(h)))
self.rect = self.image.get_rect()
self.rect.topleft = coordToBoard(pos[0])
pygame.Surface.set_colorkey(self.image,[0,0,0])
self.pos = pos
class Board:
def __init__(self, screen, pos, boardSize):
self.screen = screen
self.boardSize = boardSize
self.surface = pygame.Surface((self.boardSize, self.boardSize))
self.markers = pygame.sprite.Group() # Array of hit/miss markers
self.ships = pygame.sprite.Group() # Array of ship sprites
self.pos = pos
self.rect = self.surface.get_rect(topleft = self.pos)
self.drawShips = True
self.letters = ["A","B","C","D","E","F","G","H","I","J"]
self.numbers = ["1","2","3","4","5","6","7","8","9","10"]
# Retrieve Assets
self.assetsList = {}
self.assetNameKeys = ["BS 1","BS 2","BS 3","BS 4","BS 5","BS 6", "BS_V 1","BS_V 2","BS_V 3","BS_V 4","BS_V 5","BS_V 6","hit","miss"]
dir_path = os.path.dirname(os.path.realpath(__file__))
assetsFolder = glob.glob(dir_path + "/Assets/" + "*")
for asset in assetsFolder:
# print(asset)
for key in self.assetNameKeys:
if(asset.find(key) != -1):
self.assetsList[key] = asset
self.surface.fill(BG_COLOR) #what is this ah oops -katelyn
def clearBoard(self):
self.surface.fill(BG_COLOR)
self.markers.empty()
self.ships.empty()
#Used to draw the initial board
def drawBoard(self):
# Gridlines and labels
for x in range(0,11):
xPos = (x)*self.boardSize/10
pygame.draw.line(self.surface, (0,0,0), (xPos,0), (xPos,self.boardSize))
for y in range(0,11):
yPos = (y)*self.boardSize/10
pygame.draw.line(self.surface, (0,0,0), (0,yPos), (self.boardSize,yPos))
textType = pygame.font.Font('freesansbold.ttf', 20)
#Drawing the letters on axis
for letter in self.letters:
textSurf, textRect = self.text_objects(letter, textType)
x = ord(letter) - 65
xpos = x*self.boardSize/10 + self.pos[0] + (self.boardSize*.05)
textRect.center = (xpos, 25) # Make this flexible
self.screen.blit(textSurf,textRect)
#Drawing the numbers on axis
for number in self.numbers:
textSurf, textRect = self.text_objects(number, textType)
x = int(number) - 1
ypos = self.boardSize + (self.boardSize*.05) - x*self.boardSize/10 - self.boardSize/40
textRect.center = (self.pos[0]-25, ypos)
self.screen.blit(textSurf,textRect)
if self.drawShips:
self.ships.draw(self.surface)
self.markers.draw(self.surface)
self.screen.blit(self.surface, self.pos)
#Creates a sprite at coordinate to represent hit or miss
def addShot(self, mark, coord):
if (mark == "hit"):
png = self.assetsList["hit"]
else:
png = self.assetsList["miss"]
print(str(mark) + " at " + str(coord))
self.markers.add(Marker(png, coord, mark))
self.drawBoard()
pygame.display.flip()
def addShips(self, length, positions, orientation, hover, valid):
asset = "BS_V " if orientation == "vertical" else "BS "
asset += str(length)
ship = Ship(self.assetsList[asset], positions)
if hover:
# Transparency
ship.image.set_alpha(100)
# offset position
ship.rect[0] += self.pos[0]
ship.rect[1] += self.pos[1]
if not valid: # tint
tint = pygame.Surface(ship.image.get_size()).convert_alpha()
tint.fill((255, 50, 50))
ship.image.blit(tint, (0,0), special_flags=pygame.BLEND_RGBA_MULT)
self.screen.blit(ship.image, ship.rect)
else:
self.ships.add(ship)
# Shows ships on board
def showShips(self):
self.drawShips = True
self.drawBoard()
pygame.display.flip()
# Hides ships
def hideShips(self):
self.drawShips = False
bg = pygame.Surface((self.boardSize, self.boardSize))
bg.fill(BG_COLOR)
self.ships.clear(self.surface, bg)
self.drawBoard()
pygame.display.flip()
#Used to place text on screen
def text_objects(self,text,font):
textSurface = font.render(text,True,(0,0,0))
return textSurface, textSurface.get_rect()
def getMouse():
"""
getMouse
* @pre: That the window has been clicked on
* @post: gets and returns proper X and Y values for corresponding
//row and column
* @param: None
* @description: creates game loop and event listener the checks for
//mousebuttondown then gets mouse x and y position and uses board
//dimentions to create proper number of rows and columns according
//to x and y set and returns proper values for xVal and yVal
"""
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
# Checks if the left mouse button is clicked
if pygame.mouse.get_pressed()[0]:
# assigns x & y with the current position of the mouse.
x = pygame.mouse.get_pos()[0]
y = pygame.mouse.get_pos()[1]
running = False
#print((x, y))
return (x, y)
|
number = 1
y = 2
if number == 1:
number = number + 2
if number == y:
print ("midagi on valesti")
if 5 < 10:
print ("5 on tõesti väiksem kui 10")
if 10 >= 10:
print ("mulle sobib") |
#Prints a box
#Given a height and width, print a box consisting of * characters as its border
rows = int(input("Width? "))
columns = int(input("Height? "))
for i in range(rows):
for j in range(columns):
if (i == 0 or i == rows - 1 or j == 0 or j == columns - 1):
print('*', end = ' ')
else:
print(' ', end = ' ')
print() |
#
# MIT License
#
# Copyright (c) 2019 Keisuke Sehara
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from qtpy import QtCore as _QtCore
from qtpy.QtCore import Qt as _Qt
from qtpy import QtWidgets as _QtWidgets
from qtpy import QtGui as _QtGui
from .core import debug as _debug
from .resources import as_icon as _get_icon
class TableView(_QtWidgets.QTableView):
def __init__(self, data, logger=None, parent=None):
super().__init__(parent=parent)
self.data = data
self._logger = LogDataModel(data) if logger is None else logger
self.setModel(self._logger)
self._logger.checkedError.connect(self.showErrorDialog)
self.setSelectionBehavior(_QtWidgets.QAbstractItemView.SelectRows)
self.horizontalHeader().setStretchLastSection(True)
self.doubleClicked.connect(self.openEntry)
def insert(self, entry, index=-1):
self._logger.insert(entry, index)
def openEntry(self, index):
entry = self._logger.getEntryAt(index)
if (entry is not None) and entry.is_block():
_debug(f"opening: {entry}")
openEntity(entry.content, as_window=True)
# TODO: ignore all the primitive entries?
def showErrorDialog(self, title, msg):
_QtWidgets.QMessageBox.warning(self, title, msg)
class LogDataModel(_QtCore.QAbstractTableModel):
"""the table model for logging of procedures to subjects."""
checkedError = _QtCore.Signal(str, str)
def __init__(self, data, parent=None):
super().__init__(parent=parent)
self._entrycls = data._entrycls
self._data = data
self._root = _QtCore.QModelIndex()
def headerData(self, section, orientation, role):
"""overrides QAbstractTableModel::headerData."""
if (orientation == _Qt.Horizontal) and (role == _Qt.DisplayRole):
return self.columnName(section)
else:
return None
def columnName(self, index):
return self._entrycls._fields[index]
def rowCount(self, parent):
"""overrides QAbstractTableModel::rowCount."""
if not parent.isValid():
# root
return len(self._data)
else:
return 0
def columnCount(self, parent):
"""overrides QAbstractTableModel::columnCount."""
if not parent.isValid():
# root
return len(self._entrycls._fields)
else:
return 0
def flags(self, index):
base = super().flags(index)
if (index.isValid()) and (not self._data.get_entry(index.row()).is_block()):
if self.columnName(index.column()) != 'category':
return base | _Qt.ItemIsEditable
return base
def data(self, index, role):
if index.isValid():
if role in (_Qt.DisplayRole, _Qt.EditRole):
return self._data.get_entry(index.row()).for_display(index.column())
else:
return None
else:
return None
def setData(self, index, value, role):
entry = self._data.get_entry(index.row())
try:
entry.set_field(self._entrycls._fields[index.column()], value)
return True
except ValueError as e:
self.checkedError.emit("Input error", str(e))
return False
def getEntryAt(self, index):
if index.isValid():
return self._data.get_entry(index.row())
else:
return None
def insert(self, entry, index=-1):
if not isinstance(entry, self._entrycls):
raise ValueError(f"expected {self._entrycls.__name__}, got {entry.__class__.__name__}")
index = int(index)
if index < 0:
index = len(self._data) + 1 + index
self.beginInsertRows(self._root, index, index)
self._data.insert(entry, index=index)
self.endInsertRows()
_views = []
def openEntity(entity, parent=None, as_window=True):
view = TableView(entity, parent=parent)
view.setWindowTitle(entity.as_title(parents=True))
if as_window == True:
view.resize(600, 400)
view.show()
_views.append(view)
return view
class Browser(_QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.__populateActions()
self.resize(800, 600)
self.view_stack = []
def _openSubject(self, checked=None):
_debug("open...")
def _newSubject(self, checked=None):
_debug("new...")
def _saveSubject(self, checked=None):
_debug("save...")
def _dummy(self, checked=None):
_debug("dummy...")
def __newAction(self, widget, item):
action = item['name']
self.actions[action] = widget.addAction(_get_icon(item['icon']), item['text'])
self.actions[action].setToolTip(item['tip'])
self.actions[action].setEnabled(item['init'])
self.actions[action].triggered.connect(getattr(self, item['slot']))
if 'key' in item.keys():
# set shortcut
self.actions[action].setShortcut(_QtGui.QKeySequence(item['key']))
def __populateActions(self):
"""setting up application actions"""
bar = _QtWidgets.QMenuBar(self)
self.setMenuBar(bar)
self.menus = {}
self.menus['__root__'] = bar
self.actions = {}
for info in _commands:
if info['type'] == 'toolbar':
widget = _QtWidgets.QToolBar(self)
widget.setMovable(False)
widget.setToolButtonStyle(_Qt.ToolButtonTextUnderIcon)
for item in info['actions']:
action = item['name']
if action == '__sep__':
widget.addSeparator()
continue
else:
self.__newAction(widget, item)
self.addToolBar(info['position'], widget)
setattr(self, info['name'], widget)
elif info['type'] == 'menu':
name = info['name']
self.menus[name] = bar.addMenu(info['text'])
for item in info['actions']:
action = item['name']
if action == '__sep__':
self.menus[name].addSeparator()
continue
if action in self.actions.keys():
self.menus[name].addAction(self.actions[action])
else:
self.__newAction(self.menus[name], item)
if 'key' in item.keys():
# set shortcut
self.actions[action].setShortcut(_QtGui.QKeySequence(item['key']))
else:
continue
_commands = [
{
'name': 'browse',
'type': 'toolbar',
'position': _Qt.TopToolBarArea,
'actions': [
{
'name': 'prev',
'icon': 'backward.png',
'text': 'Go back',
'tip': 'Back to the parent view',
'slot': '_dummy',
'init': False
},
{
'name': 'next',
'icon': 'forward.png',
'text': 'Go forward',
'tip': 'Go to the child view that has been shown previously',
'slot': '_dummy',
'init': False
},
{
'name': '__sep__'
},
{
'name': 'edit',
'icon': 'edit.png',
'text': 'Edit...',
'tip': 'Edit the selected entry',
'slot': '_dummy',
'init': False
},
{
'name': 'add',
'icon': 'plus.png',
'text': 'Add...',
'tip': 'Add a new entry to the current block',
'slot': '_dummy',
'init': False
},
{
'name': 'remove',
'icon': 'minus.png',
'text': 'Remove',
'tip': 'Remove the selected entry',
'slot': '_dummy',
'init': False
}
]
},
{
'name': 'operate',
'type': 'toolbar',
'position': _Qt.RightToolBarArea,
'actions': [
{
'name': 'new',
'icon': 'new.png',
'text': 'New...',
'tip': 'Create and open a new subject log',
'slot': '_newSubject',
'init': True
},
{
'name': 'open',
'icon': 'open.png',
'text': 'Open...',
'tip': 'Open another subject log',
'slot': '_openSubject',
'init': True
},
{
'name': 'save',
'icon': 'save.png',
'text': 'Save',
'tip': 'Save the current subject log',
'slot': '_saveSubject',
'init': False
},
{
'name': 'close',
'icon': 'exit.png',
'text': 'Close',
'tip': 'Close the current subject log',
'slot': '_dummy',
'init': False
},
{
'name': '__sep__'
},
{
'name': 'info',
'icon': 'info.png',
'text': 'Info...',
'tip': 'Show information about this subject',
'slot': '_dummy',
'init': False
}
]
},
{
'name': 'file',
'type': 'menu',
'text': 'File',
'actions': [
{
'name': 'new',
'key': 'Ctrl+N'
},
{
'name': 'open',
'key': 'Ctrl+O'
},
{
'name': 'close',
'key': 'Ctrl+W'
},
{
'name': '__sep__'
},
{
'name': 'save',
'key': 'Ctrl+S'
},
{
'name': '__sep__'
},
{
'name': 'info',
'key': 'Ctrl+I'
}
]
},
{
'name': 'entry',
'type': 'menu',
'text': 'Entry',
'actions': [
{
'name': 'add',
'key': 'Ctrl+Shift+N'
},
{
'name': 'remove',
'key': _Qt.CTRL + _Qt.Key_Delete
},
{
'name': '__sep__'
},
{
'name': 'edit',
'key': _Qt.CTRL + _Qt.Key_Return
}
]
},
{
'name': 'navigate',
'type': 'menu',
'text': 'Navigate',
'actions': [
{
'name': 'prev',
'key': 'Ctrl+['
},
{
'name': 'next',
'key': 'Ctrl+]'
}
]
}
]
|
#!/usr/bin/python
import sys
import json
from Bio import SeqIO
import os
import subprocess
from collections import defaultdict
target_to_seq = {}
# Make a dictionary for target ids and their sequences
targets_filename = "/mnt/gnpn/gnpn/projects/orphanpks/TargetMining/Antismash_gbids/targets.12.fa"
for record in SeqIO.parse(open(targets_filename, "rU"), "fasta"):
targetid = record.id
targetseq = record.seq
# print targetid, targetseq[:10]
target_to_seq[targetid] = targetseq
# Read antismash output file
# ACXX02000001|AdmT_ACC|37972|38377|31720|32586|cluster-1|transatpks-nrps|14512-116691|5386
antismash_filename = "/mnt/gnpn/gnpn/projects/orphanpks/TargetMining/Antismash_gbids/out.12.filtered.10kb"
antismash_file = open(antismash_filename).readlines()
gbid_to_target = defaultdict(list)
for line in antismash_file:
line = line.strip()
features = line.split("|")
gbid, targetid = features[:2]
print gbid, targetid
gbid_to_target[gbid].append((targetid))
for gbid in sorted(gbid_to_target.keys()):
targetids = gbid_to_target[gbid]
for targetid in targetids:
targetseq = target_to_seq[targetid]
print gbid, targetid, targetseq[:10]
gbdir1 = "/mnt/gnpn/gnpn/projects/orphanpks/TargetMining/Genbank/gbdir"
gbdir2 = "/mnt/gnpn/gnpn/projects/orphanpks/TargetMining/Genbank/assembly_gb"
path1 = os.path.join(gbdir1, gbid + ".gb")
path2 = os.path.join(gbdir2, gbid + ".gbff")
if os.path.exists(path1) is True:
gbfile = path1
elif os.path.exists(path2) is True:
gbfile = path2
else:
print "filename not correclty parsed"
print gbfile1
print gbfile2
queryfile = "targets_fasta/%s.fasta" % targetid
# print queryfile
# print gbfile
outdir = "/mnt/gnpn/gnpn/projects/orphanpks/TargetMining/Second_copy"
blast_evalue_cutoff = 1e-8
outfilename = os.path.join(outdir, targetid + "." +
str(blast_evalue_cutoff) +
"." + gbid + ".out")
# print outfilename
# EF-Tu.1e-08.NZ_KQ948231.out
# if targetid == "borI_Thr-tRNA-syn" and gbid == "KT362046":
# print queryfile
# print gbfile
# tblastn -query targets_fasta/EF-Tu.fasta -subject /mnt/gnpn/gnpn/projects/orphanpks/TargetMining/Genbank/assembly_gb/KB899005.gbff -out out.txt
subprocess.call(["tblastn", "-query", queryfile,
"-subject", gbfile,
"-out", outfilename,
"-evalue", str(blast_evalue_cutoff),
"-outfmt", "6 qseqid sseqid sstart send nident qlen slen evalue"])
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from utils import read_log, plot_hist, update_fontsize, autolabel, read_p100_log
from plot_sth import Bar
import plot_sth as Color
OUTPUT_PATH = '/media/sf_Shared_Data/tmp/sc18'
num_of_nodes = [2, 4, 8, 16]
num_of_nodes = [2, 4, 8]
#num_of_nodes = [8, 80, 81, 82, 83, 85]
#num_of_nodes = [16, 32, 64]
B = 9.0 * 1024 * 1024 * 1024.0 / 8 # 10 Gbps Ethernet
#B = 56 * 1024 * 1024 * 1024.0 / 8 # 56 Gbps IB
markers = {2:'o',
4:'x',
8:'^'}
def time_of_allreduce(n, M, B=B):
"""
n: number of nodes
M: size of message
B: bandwidth of link
"""
# Model 1, TernGrad, NIPS2017
#if True:
# ncost = 100 * 1e-6
# nwd = B
# return ncost * np.log2(n) + M / nwd * np.log2(n)
# Model 2, Lower bound, E. Chan, et al., 2007
if True:
#alpha = 7.2*1e-6 #Yang 2017, SC17, Scaling Deep Learning on GPU and Knights Landing clusters
#alpha = 6.25*1e-6*n # From the data gpuhome benchmark
#alpha = 12*1e-6*n # From the data gpuhome benchmark
alpha = 45.25*1e-6#*np.log2(n) # From the data gpuhome benchmark
beta = 1 / B *1.2
gamma = 1.0 / (16.0 * 1e9 * 4) * 160
M = 4*M
t = 2*(n)*alpha + 2*(n-1)*M*beta/n + (n-1)*M*gamma/n
return t * 1e6
ts = 7.5/ (1000.0 * 1000)# startup time in second
#seconds = (np.ceil(np.log2(n)) + n - 1) * ts + (2*n - 1 + n-1) * M / n * 1/B
#seconds = (np.ceil(np.log2(n)) + n - 1) * ts + 2 * (n - 1) * 2*M/n * 1/B
#tcompute = 1. / (2.2 * 1000 * 1000 * 1000)
tcompute = 1. / (1 * 1000 * 1000 * 1000)
#seconds = 2 * (n - 1) * ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
#C = 1024.0 * 1024 # segmented_size
#if M > C * n:
# # ring_segmented allreduce
# seconds = (M / C + (n - 2)) * (ts + C / B + C * tcompute)
#else:
# ring allreduce, better than the above
#seconds = (n - 1) * ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
seconds = 2*(n-1)*n*ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
#C = 512.0
#seconds = (M / C + n-2) * (ts + C/B)
return seconds * 1000 * 1000 # micro seconds
class Simulator():
def __init__(self, name, computes, sizes, num_of_nodes, render=True):
self.name = name
self.computes = computes
self.sizes = sizes
self.num_of_nodes = num_of_nodes
self.comms = None
self.title = name + ' (WFBP)'
self.max_time = 0
self.ax = None
self.render = render
self.merged_layers = []
def wfbp(self, with_optimal=False):
start_time = 0.0
comm_start_time = 0.0
comm = 0.0
if not self.comms:
comms = [time_of_allreduce(self.num_of_nodes, s, B) for s in self.sizes]
else:
comms = self.comms
max_time = max(np.sum(self.computes), np.sum(comms)+self.computes[0])
if not with_optimal:
self.max_time = max_time
if not self.ax and self.render:
fig, ax = plt.subplots(1, figsize=(30, 3))
#ax.set_title(self.title, x=0.5, y=0.8)
self.ax = ax
comm_layer_id = ''
for i in range(len(self.computes)):
comp = self.computes[i]
layer_id = len(self.computes) - i
if not with_optimal:
if self.render:
bar = Bar(start_time, comp, self.max_time, self.ax, type='p', index=layer_id)
bar.render()
if comm_start_time + comm > start_time + comp:
comm_start_time = comm_start_time + comm
else:
comm_start_time = start_time + comp
if comm == 0.0 and comm_layer_id != '':
comm_layer_id = str(comm_layer_id)+','+str((len(self.computes) - i))
else:
comm_layer_id = str(layer_id)
comm = comms[i]
type = 'wc'
if with_optimal:
type = 'mc'
if self.render:
bar_m = Bar(comm_start_time, comm, self.max_time, self.ax, type=type, index=comm_layer_id, is_optimal=with_optimal)
bar_m.render()
start_time += comp
total_time = (comm_start_time + comm)/1000.0
print('Total time: ', total_time, ' ms')
if self.render:
plt.subplots_adjust(left=0.06, right=1.)
return total_time
def synceasgd(self):
start_time = 0.0
comm_start_time = 0.0
comm = 0.0
total_size = np.sum(self.sizes)
comm = time_of_allreduce(self.num_of_nodes, total_size, B)
total_comp = np.sum(self.computes)
comm_start_time = total_comp
index = ','.join([str(len(self.computes)-i) for i in range(0, len(self.computes))])
if self.render:
bar = Bar(np.sum(self.computes), comm, self.max_time, self.ax, type='sc', index=index)
bar.render()
total_time = (comm_start_time + comm)/1000.0
print('Total time: ', total_time, ' ms')
if self.render:
pass
return total_time
def cal_comm_starts(self, comms, comps):
"""
comms and comps have been aligned
"""
start_comms = []
start_comms.append(0.0)
sum_comp = 0.0
for i in range(1, len(comms)):
comm = comms[i-1]
comp = comps[i-1]
#print(start_comms[i-1],comm, sum_comp,comp)
start_comm = max(start_comms[i-1]+comm, sum_comp+comp)
#print('start_comm: ', start_comm, ', comm: ', comm)
start_comms.append(start_comm)
sum_comp += comp
return start_comms
def merge(self, comms, sizes, i, p, merge_size, comps):
comms[i] = 0# merge here
comms[i+1] = p
sizes[i+1] = merge_size
start_comms = self.cal_comm_starts(comms, comps)
#print('start_comms: ', start_comms)
self.merged_layers.append(i)
return start_comms
def gmwfbp2(self):
if not self.comms:
comms = [time_of_allreduce(self.num_of_nodes, s, B) for s in self.sizes]
else:
comms = self.comms
#comms = comms[0:-1]
#print('comms: ', comms)
comps = self.computes[1:]
comps.append(0) # for last communication
optimal_comms = list(comms)
optimal_sizes = list(self.sizes)
start_comms = self.cal_comm_starts(optimal_comms, comps)
sum_comp = 0.0
#print('start_comms: ', start_comms)
#return
for i in range(0, len(comms)-1):
comp = comps[i]
comm = optimal_comms[i]
if start_comms[i] + comm > comp+sum_comp:
# cannot be hidden, so we need to merge
merge_size = optimal_sizes[i+1] + optimal_sizes[i]
r = comm + optimal_comms[i+1]
p = time_of_allreduce(self.num_of_nodes, merge_size, B)
if start_comms[i] >= comp+sum_comp:
# don't care about computation
if p < r:
start_comms = self.merge(optimal_comms, optimal_sizes, i, p, merge_size, comps)
#optimal_comms[i] = 0# merge here
#optimal_comms[i+1] = p
#optimal_sizes[i+1] += merge_size
#start_comms = self.cal_comm_starts(optimal_comms, comps)
else:
if comp+sum_comp+p < start_comms[i]+comm+optimal_comms[i+1]:
start_comms = self.merge(optimal_comms, optimal_sizes, i, p, merge_size, comps)
else:
pass # optimal, nothing to do
sum_comp += comp
optimal_comms.append(comms[-1])
self.wfbp()
self.synceasgd()
self.comms = optimal_comms
self.title = self.name+ ' (GM-WFBP)'
ret = self.wfbp(with_optimal=True)
#print('merged-layers: ', self.merged_layers)
return ret
def read_allreduce_log(filename):
f = open(filename, 'r')
sizes = []
comms = []
for l in f.readlines():
if l[0] == '#' or len(l)<10 :
continue
items = ' '.join(l.split()).split()
comm = float(items[-1])
#size = int(items[0].split(',')[1])
size = int(items[0])/4
#if size > 2e3:
# break
if size < 2048 or size > 2e4:
continue
#num_of_nodes = int(items[0].split(',')[0])
comms.append(comm)
sizes.append(size)
f.close()
#print('num_of_nodes: ', num_of_nodes)
#print('sizes: ', sizes)
#print('comms: ', comms)
return sizes, comms, []
def predict(filename, n, color, marker, label, sizes=None, ax=None):
#sizes, comms, comps, merged_comms = read_log(filename)
if ax is None:
fig, ax = plt.subplots(figsize=(5,4.5))
if sizes is None:
sizes, comms, comps = read_allreduce_log(filename)
#plt.scatter(range(1, len(sizes)+1), sizes, c=color, label=label, marker=marker, s=40, facecolors='none', edgecolors=color)
plt.plot(sizes, comms, c=color, marker=marker, label=label+' measured', linewidth=2)
#plt.plot(sizes, comms, c=color, marker=marker, label=label, linewidth=2)
#bandwidths = np.array(sizes)/np.array(comms)
#plt.plot(sizes, bandwidths, c=color, marker=marker, label=label, linewidth=2)
predicts = []
for M in sizes:
p = time_of_allreduce(n, M, B)
predicts.append(p)
#rerror = (np.array(predicts)-np.array(comms))/np.array(comms)
#print('erro: ', np.mean(np.abs(rerror)))
#plt.scatter(sizes, predicts, c='red', marker=markers[n])
ax.plot(sizes, predicts, c=color, marker=marker, linestyle='--', label=label+' predict', markerfacecolor='white', linewidth=1)
return sizes
def plot_all_communication_overheads():
#labels = ['2-node', '4-node', '8-node', '16-node']
fig, ax = plt.subplots(figsize=(5,4.5))
labels = ['%d-node' % i for i in num_of_nodes]
colors = ['r', 'g', 'b', 'black', 'y', 'c']
markers = ['^', 'o', 'd', '*', 'x', 'v']
sizes = None
sizes = np.arange(128.0, 1e5, step=8192)
for i, n in enumerate(num_of_nodes):
test_file = '/media/sf_Shared_Data/gpuhome/repositories/mpibench/allreduce%d.log' % n
#test_file = '/media/sf_Shared_Data/gpuhome/repositories/mpibench/t716/allreduce%d.log' % n # 1Gbps
#test_file = '/media/sf_Shared_Data/gpuhome/repositories/mpibench/t716/ompi2.1log/allreduce%d.log' % n # 1Gbps
#test_file = '/media/sf_Shared_Data/gpuhome/repositories/mpibench/t716/ompi3.0log/allreduce%d.log' % n # 1Gbps
#sizes = predict(test_file, n, colors[i], markers[i], labels[i])
predict(test_file, n, colors[i], markers[i], labels[i], sizes, ax)
#plt.xlim(left=0)
#plt.xlabel('Message size (bytes)')
#ax.ticklabel_format(style='sci',axis='x')
plt.xlabel('# of parameters')
plt.ylabel(r'Latency ($\mu$s)')
plt.ylim(bottom=0, top=plt.ylim()[1]+200)
#plt.xscale("log", nonposy='clip')
plt.legend(ncol=1, loc=2)
update_fontsize(ax, fontsize=14)
plt.subplots_adjust(left=0.18, bottom=0.13, top=0.91, right=0.92)
#plt.savefig('%s/%s.pdf' % (OUTPUT_PATH, 'commtime'))
plt.show()
def gmwfbp_simulate():
name = 'GoogleNet'
#name = 'ResNet'
#name = 'VGG'
#name = 'DenseNet'
num_of_nodes = 32
test_file = '/media/sf_Shared_Data/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/%s/tmp8comm.log' % name.lower()
sizes, comms, computes, merged_comms = read_log(test_file)
#computes = [c/4 for c in computes]
#sizes = [1., 1., 1., 1.]
#computes = [3., 3.5, 5., 6.]
#sim = Simulator(name, computes[0:4], sizes[0:4], num_of_nodes)
sim = Simulator(name, computes, sizes, num_of_nodes)
#sim.wfbp()
sim.gmwfbp2()
plt.savefig('%s/breakdown%s.pdf' % (OUTPUT_PATH, name.lower()))
#plt.show()
def gmwfbp_speedup():
#configs = ['GoogleNet', 64]
configs = ['ResNet', 32]
#configs = ['DenseNet', 128]
name = configs[0]
b = configs[1]
test_file = '/media/sf_Shared_Data/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/%s/tmp8comm.log' % name.lower()
sizes, comms, computes, merged_comms = read_log(test_file)
device = 'k80'
device = 'p100'
pfn = '/media/sf_Shared_Data/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/%s/tmp8commp100%s.log' % (name.lower(), name.lower())
val_sizes, computes = read_p100_log(pfn)
print('computes: ', np.sum(computes))
print('computes: ', computes)
assert len(computes) == len(sizes)
nnodes = [4, 8, 16, 32, 64]
#nnodes = [2, 4, 8]
wfbps = []
gmwfbps = []
synceasgds = []
micomputes = np.array(computes)
tf = np.sum(micomputes) * 0.5 / 1000
tb = np.sum(micomputes) / 1000
total_size = np.sum(sizes)
single = b/(tf+tb)
optimal = []
colors = ['k', 'r', 'g', 'b']
markers = ['s', '^', 'o', 'd']
for num_of_nodes in nnodes:
sim = Simulator(name, computes, sizes, num_of_nodes, render=False)
wfbp = sim.wfbp()
wfbps.append(b*num_of_nodes/(wfbp+tf)/single)
gmwfbp = sim.gmwfbp2()
gmwfbps.append(b*num_of_nodes/(gmwfbp+tf)/single)
tc = time_of_allreduce(num_of_nodes, total_size, B)/1000
print('#nodes:', num_of_nodes, ', tc: ', tc)
synceasgd = tb + tf + tc
synceasgds.append(b*num_of_nodes/synceasgd/single)
optimal.append(num_of_nodes)
print('tf: ', tf)
print('tb: ', tb)
print('total_size: ', total_size)
print('wfbp: ', wfbps)
print('gmwfbps: ', gmwfbps)
print('synceasgds: ', synceasgds)
print('compared to synceasgds: ', np.array(gmwfbps)/np.array(synceasgds))
print('compared to wfbps: ', np.array(gmwfbps)/np.array(wfbps))
fig, ax = plt.subplots(figsize=(5,4.5))
ax.plot(nnodes, optimal, color='k', marker='s', label='Linear')
ax.plot(nnodes, wfbps, color='r', marker='d', label='WFBP')
ax.plot(nnodes, synceasgds, color='b', marker='o', label='SyncEASGD')
ax.plot(nnodes, gmwfbps, color='g', marker='^', label='MG-WFBP')
plt.legend(loc=2)
plt.xlabel('# of nodes')
plt.ylabel('Speedup')
#plt.title('%s-Simulation'%name)
#plt.yscale('log', basey=2)
#plt.xscale('log', basey=2)
plt.ylim(bottom=1,top=nnodes[-1]+1)
plt.xlim(left=1, right=nnodes[-1]+1)
plt.xticks(nnodes)
plt.yticks(nnodes)
plt.grid(color='#5e5c5c', linestyle='-.', linewidth=1)
update_fontsize(ax, fontsize=14)
plt.subplots_adjust(left=0.13, bottom=0.13, top=0.96, right=0.97)
plt.savefig('%s/speedup%s.pdf' % (OUTPUT_PATH, name.lower()+device))
#plt.show()
def plot_realdata_comm(datas, configs):
def calculate_real_comms(data, bs):
times = [bs/((d/2)/2**(i-1)) for i, d in enumerate(data)]
comp = times[0]
comms = [t-times[0] for t in times[1:]]
return comp, comms
fig, ax = plt.subplots(figsize=(4.8,3.4))
count = len(datas[0][1:])
ind = np.arange(count)
width = 0.25
s = -int(count/2)
print('s: ', s)
margin = 0.05
xticklabels = [str(2**(i+1)) for i in range(count)]
s = (1 - (width*count+(count-1) *margin))/2+width
ind = np.array([s+i+1 for i in range(count)])
centerind = None
labels=['WF.', 'S.E.', 'M.W.']
for i, data in enumerate(datas):
comp, comms= calculate_real_comms(data, configs[1])
comps = [comp for j in comms]
newind = ind+s*width+(s+1)*margin
p1 = ax.bar(newind, comps, width, color=Color.comp_color,hatch='x', label='Comp.')
p2 = ax.bar(newind, comms, width,
bottom=comps, color=Color.comm_color, label='Comm.')
s += 1
autolabel(p2, ax, labels[i], 0)
print('comp: ', comp)
print('comms: ', comms)
print('')
rects = ax.patches
ax.text(10, 10, 'ehhlo', color='b')
handles, labels = ax.get_legend_handles_labels()
#ax.legend([handles[0][0]], [labels[0][0]], ncol=2)
print(labels)
print(handles)
ax.set_xlim(left=1+0.3)
ax.set_ylim(top=ax.get_ylim()[1]*1.3)
ax.set_xticks(ind+2*(width+margin))
ax.set_xticklabels(xticklabels)
ax.set_xlabel('# of nodes')
ax.set_ylabel('Time [s]')
update_fontsize(ax, 14)
ax.legend((p1[0], p2[0]), (labels[0],labels[1] ), ncol=2, handletextpad=0.2, columnspacing =1.)
fig.subplots_adjust(left=0.16, right=0.96, bottom=0.17, top=0.94)
plt.savefig('%s/comm%sreal.pdf' % (OUTPUT_PATH, configs[0].lower()))
#plt.show()
def realdata_speedup():
configs = ['GoogleNet', 64]
wfbps = [81.68*2, 74.83*2*2, 74.91*2*4, 2*62.9*8]
gmwfbps = [81.68*2, 79.02*2*2, 75.03*2*4, 2*75.68*8]
synceasgds =[81.68*2, 62.57*2*2, 57.67*2*4, 2*55.58*8]
device = 'k80'
configs = ['ResNet', 32]
wfbps = [76.85, 75.55*2, 73.679*4, 58.2*8]
gmwfbps = [76.85, 75.59*2, 73.8*4, 70.8251*8]
synceasgds =[76.85, 60.0*2, 55.7*4, 50.8*8]
datas = [wfbps, synceasgds, gmwfbps]
#plot_realdata_comm(datas, configs)
#return
#configs = ['DenseNet', 128]
name = configs[0]
b = configs[1]
nnodes = [2, 4, 8]
fig, ax = plt.subplots(figsize=(5,4.5))
optimal = nnodes
wfbps = [i/wfbps[0] for i in wfbps[1:]]
gmwfbps = [i/gmwfbps[0] for i in gmwfbps[1:]]
synceasgds= [i/synceasgds[0] for i in synceasgds[1:]]
print('compared to wfbp: ', np.array(gmwfbps)/np.array(wfbps))
print('compared to synceasgds: ', np.array(gmwfbps)/np.array(synceasgds))
ax.plot(nnodes, optimal, color='k', marker='s', label='Linear')
ax.plot(nnodes, wfbps, color='r', marker='d', label='WFBP')
ax.plot(nnodes, synceasgds, color='b', marker='o', label='SyncEASGD')
ax.plot(nnodes, gmwfbps, color='g', marker='^', label='MG-WFBP')
#plt.yscale('log', basey=2)
#plt.xscale('log', basey=2)
plt.legend(loc=2)
plt.xlabel('# of nodes')
plt.ylabel('Speedup')
plt.xticks(nnodes)
plt.yticks(nnodes)
plt.ylim(bottom=1,top=nnodes[-1]+1)
plt.xlim(left=1, right=nnodes[-1]+1)
plt.grid(color='#5e5c5c', linestyle='-.', linewidth=1)
#plt.title('%s-Realworld'%name)
update_fontsize(ax, fontsize=14)
plt.subplots_adjust(left=0.13, bottom=0.13, top=0.96, right=0.97)
plt.savefig('%s/speedup%sreal.pdf' % (OUTPUT_PATH, name.lower()+device))
#plt.show()
def parse_real_comm_cost():
configs = ['GoogleNet', 'gm'] #SyncEASGD
name = configs[0]
t = configs[1]
nnodes = [2, 4, 8]
ncomms = []
for n in nnodes:
test_file = '/home/shshi/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/%s/%s%dcomm.log' % (name.lower(), t, n)
sizes, comms, computes, merged_comms = read_log(test_file)
ncomms.append(np.sum(merged_comms))
print('network: ', name, ', type: ', t)
print('ncomms: ', ncomms)
def speedup_with_r_and_n(r, n):
return n/(1.+r)
def draw_ssgd_speedup():
Ns = [8, 16, 32, 64]
r = np.arange(0, 4, step=0.1)
for N in Ns:
s = N / (1+r)
plt.plot(r, s)
#plt.yscale('log', basey=2)
plt.show()
if __name__ == '__main__':
#test_file = '/media/sf_Shared_Data/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/googlenet/tmp8ocomm.log'
#read_log(test_file)
#test_file = '../logdata/allreduce%d.log' % num_of_nodes
#test_file = '/media/sf_Shared_Data/gpuhome/repositories/mpibench/allgather%d.log' % num_of_nodes
#plot_all_communication_overheads()
#gmwfbp_simulate()
#realdata_speedup()
#parse_real_comm_cost()
gmwfbp_speedup()
#draw_ssgd_speedup()
|
from rest_framework import serializers
from common.models.general import BusinessUnit
class BusinessUnitSerializer(serializers.ModelSerializer):
label = serializers.StringRelatedField(source='Description')
value = serializers.StringRelatedField(source='BusinessUnitId')
class Meta:
model = BusinessUnit
fields = ('label', 'value')
|
import poplib
from email.parser import Parser
from decodeMailContent import print_info, get_att
def decodedict(dict1):
temp=dict()
for key in dict1:
value=dict1[key]
temp[key]=value
return temp
def decodeBoolean(boolean1):
temp=False;
temp=boolean1;
return temp;
def decodeMailContentDao(server):
# list()返回所有邮件的编号:
resp, mails, octets = server.list()
#print(mails)
# 获取最新一封邮件, 注意索引号从1开始:
indexs = len(mails)
if indexs==0:
return []
allmails=list(range(indexs+1))
for index in range(1,indexs+1):
haveAttach=False
resp, lines, octets = server.retr(index)
msg_content = b'\r\n'.join(lines).decode('utf-8')
msg = Parser().parsestr(msg_content)
print("---------------------------日期")
print(msg)
dates=str(msg.get('Date',))
headers,contents,num=print_info(msg)
i=0
for p in msg.walk():
i=i+1
if i>1:
haveAttach=True;
"""
为什么这么麻烦不直接把headers,contents封装成字典呢?
因为print_info返回的是地址,所以牵一发动全身,所以重新创建个对象装
"""
headers=decodedict(headers)
contents=decodedict(contents)
haveAttach=decodeBoolean(haveAttach)
key=['headers','contents','dates','haveAttach','msg']
value=[headers,contents,dates,haveAttach,msg]
allmails[index]=dict(zip(key,value))
#print("---------第%d次循环----------------"%index)
return allmails |
#!/usr/bin/python3
import sys
def read_map_output(file):
""" Return an iterator for key, value pair extracted from file (sys.stdin).
Input format: key \t value
Output format: (key, value)
"""
for line in file:
yield line.strip().split("\t")
def tag_reducer():
data = read_map_output(sys.stdin)
for number, place, tag_number in data:
print(place.split(', ')[0] +'\t'+ number+'\t'+ tag_number)
if __name__ == "__main__":
tag_reducer()
|
from __future__ import with_statement
import os.path
import random
import pytest
from whoosh import redline as kv
from whoosh.compat import b, xrange
from whoosh.util import now, random_name
from whoosh.util.testing import TempDir
def test_bisect_regions():
regions = [kv.Region(0, 0, "b", "d", 0),
kv.Region(0, 0, "f", "h", 0),
kv.Region(0, 0, "j", "m", 0)]
assert kv.bisect_regions(regions, "a") == 0
assert kv.bisect_regions(regions, "b") == 0
assert kv.bisect_regions(regions, "c") == 0
assert kv.bisect_regions(regions, "d") == 0
assert kv.bisect_regions(regions, "e") == 1
assert kv.bisect_regions(regions, "f") == 1
assert kv.bisect_regions(regions, "i") == 2
assert kv.bisect_regions(regions, "j") == 2
assert kv.bisect_regions(regions, "m") == 2
assert kv.bisect_regions(regions, "n") == 3
assert kv.bisect_regions(regions, "z") == 3
def test_segments():
r1 = kv.Region(0, 0, "b", "d", 0)
r2 = kv.Region(0, 0, "f", "h", 0)
r3 = kv.Region(0, 0, "j", "m", 0)
regions = [r1, r2, r3]
output = kv.segment_keys(regions, "abcdefghijklmnop")
assert output == [
("a", None),
("bcd", r1),
("e", None),
("fgh", r2),
("i", None),
("jklm", r3),
("nop", None)
]
def test_write_read():
items = [
(b("alfa"), b("bravo")),
(b("charlie"), b("delta")),
(b("echo"), b("foxtrot")),
(b("golf"), b("hotel")),
(b("india"), b("juliet")),
(b("kilo"), b("lima")),
(b("mike"), b("november")),
(b("oskar"), b("papa")),
(b("quebec"), b("romeo")),
]
with TempDir("kvwriteread") as dirpath:
path = os.path.join(dirpath, "test")
with open(path, "wb") as f:
regions = list(kv.write_regions(f, items, 4096))
assert len(regions) == 1
with open(path, "rb") as f:
readitems = list(kv.read_region(f, regions[0]))
assert readitems == items
def test_merge_items():
items1 = [("c", "d"), ("e", "f"), ("g", "h"), ("i", "j"), ("o", "p")]
items2 = [("_", ":"), ("a", "b"), ("e", None), ("i", "k"), ("m", "n")]
target = [
("_", ":"), ("a", "b"), ("c", "d"), ("g", "h"), ("i", "k"), ("m", "n"),
("o", "p")
]
output = list(kv.merge_items(items1, items2))
assert output == target
def test_merge_random():
items1 = sorted((random_name(4), random_name(8)) for _ in xrange(500))
items2 = sorted((random_name(4), random_name(8)) for _ in xrange(500))
x1 = sorted(dict(items1 + items2).items())
x2 = list(kv.merge_items(items1, items2))
assert x1 == x2
|
class Coche():
def __init__(self):
self.marca = "Audi"
self.color = "Rojo"
self.ruedas = 4
self.enmarcha = False
def arrancar(self,arrancamos):
self.enmarcha=arrancamos
if(self.enmarcha):
return "El coche esta en marcha"
else:
return "El coche esta apagado"
def __str__(self):
return "Este auto esta en marca {}, de color {}, tiene {} ruedas".format(self.marca,self.color,self.ruedas)
miCoche=Coche()
print(miCoche.arrancar(False))
print(str(miCoche)) |
from django.db import models
# Primary key (serial number or other ID) of Device is expected to be set
# before the device attempts to connect for the first time.
# Create your models here.
class Device(models.Model):
# Serial number (or other ID) of device
device_id = models.CharField(primary_key=True, max_length=20)
# online/offline
status = models.CharField(max_length=7)
fw_version = models.CharField(max_length=10)
# private DES key
key = models.CharField(max_length=8)
def set_online(self) -> None:
self.status = 'online'
return
def set_offline(self) -> None:
self.status = 'offline'
return
class Item(models.Model):
device = models.ForeignKey('Device', on_delete=models.CASCADE)
name = models.CharField(max_length=20)
type = models.CharField(max_length=10)
value = models.CharField(max_length=30)
time = models.DateTimeField()
|
JSON_TOKENS = [",", ":", "{", "}", "[", "]"]
def tokenize(string):
"""
Decomposes a string representation of json into a list of json tokens
Tokens include: {, }, [, ], :, ,, strings, bools, null, numerics
:param string: String representation of json
:return: List of tokens
"""
tokens = []
while string:
bool_or_null_token, string = tokenize_bool_or_null(string)
if bool_or_null_token != -1:
tokens.append(bool_or_null_token)
continue
num_token, string = tokenize_number(string)
if num_token is not None:
tokens.append(num_token)
continue
if string[0].isspace():
string = string[1:]
continue
elif string[0] in JSON_TOKENS:
tokens.append(string[0])
string = string[1:]
continue
# Do strings last because we aren't requiring quotes around them
str_token, string = tokenize_string(string)
if str_token is not None:
tokens.append(str_token)
continue
return tokens
def tokenize_string(string: str):
"""
Attempt to parse the first part of 'string' into a str until we hit a
terminal char. Handles both quoted strings and non-quoted
:param string: Input stream
:return: Token, remainder of string
"""
if not string:
return None, string
ret_val = ''
is_quoted = string[0] in ["'", '"']
if is_quoted:
string = string[1:] # remove leading "'"
for char in string:
if is_quoted and char in ["'", '"']:
return ret_val, string[len(ret_val) + 1:]
elif not is_quoted and char in [":", ","]:
return ret_val, string[len(ret_val):]
else:
ret_val += char
if not is_quoted:
return ret_val, string[len(ret_val):]
raise ValueError(f"Reached string end without closing quote: {string}")
def tokenize_number(string):
"""
Attempt to interpret the 'string' as a number token
:param string: Input stream
:return: Token, remainder of string
"""
# accumulate all chars that might represent a number into num
num = ''
for char in string:
if char.isnumeric() or char in ["-", "."]:
num += char
else:
break
if not num: # No number found
return None, string
# Convert to float or int
return (float(num) if "." in num else int(num)), string[len(num):]
def tokenize_bool_or_null(string):
"""
Attempt to interpret the 'string' as a bool or None token
:param string: Input stream
:return: Token, remainder of string
"""
if string.startswith("true"):
return True, string[len("true"):]
elif string.startswith("false"):
return False, string[len("false"):]
elif string.startswith("null"):
return None, string[len("null"):]
else:
return -1, string # Use -1 as "failure" case
if __name__ == "__main__":
pass
|
import clilib.decorator.resource as resource
import clilib.decorator.verb as verb
class TestDecoratorVerb():
def test_decorated_function_registers_as_a_verb(self):
@resource
class MyResource():
@verb
def get(self):
print('Got something')
assert 'get' in getattr(MyResource, '_parsers')
def test_specify_verb_through_decorator(self):
pass
|
# game.py
import random # loads the random module
import os
from dotenv import load_dotenv
load_dotenv()
print("Rock, Paper, Scissors, Shoot!")
print("----------------------")
#capture player name
player_name = os.getenv('PLAYER_NAME')
print ("Hi ",player_name,". Welcome to my game.")
#prompt user to enter a choice and capture their input
player_choice = input("Please choose one of 'rock', 'paper', 'scissors': ")
print("----------------------")
print("Player chose: ", player_choice)
# validate user input
if (player_choice == "rock") or (player_choice == "paper") or (player_choice == "scissors"):
message = "Choice received successfully."
else:
print("OOPS, invalid input. Please try again.")
exit()
# list of valid choices
valid_choices = ["rock","paper","scissors"]
computer_choice = random.choice(valid_choices)
print("Computer chose: ", computer_choice)
winner = "" #set the winner to empty before evaluating the winner
#evaluate the winner
if (player_choice == "rock"):
if (computer_choice == "scissors"):
winner = player_name
elif (computer_choice == "paper"):
winner = 'the computer'
elif (player_choice == "scissors"):
if (computer_choice == "paper"):
winner = player_name
elif (computer_choice == "rock"):
winner = 'the computer'
elif (player_choice == "paper"):
if (computer_choice == "scissors"):
winner = 'the computer'
elif (computer_choice == "rock"):
winner = player_name
print("----------------------")
# if there is a winner say who won, otherwise declare a tie
if winner:
print('The winner is', winner)
print("----------------------")
print("THIS IS THE END OF THE GAME. PLEASE PLAY AGAIN.")
else:
print("The game is a tie! Please Play again.")
|
import '.'firebase_admin
from firebase_admin import '.'ml
from firebase_admin import '.'credentials
firebase_admin.initialize_app(
credentials.Certificate('/path/to/your/service_account_key.json'),
options={
'storageBucket': 'your-storage-bucket',
})
model = create_model()
source = ml.TFLiteGCSModelSource.from_tflite_model_file('model_v2.tflite')
model.model_format = ml.TFLiteFormat(model_source=source)
model.display_name = "model_v2"
model.tags = ["model_v2", "new_models"]
model.tags += "experimental"
updated_model = ml.update_model(model)
ml.publish_model(updated_model.model_id)
|
# coding=utf-8
class AnsiColours:
colours = {
'black': '\033[0;30m',
'blue': '\033[0;34m',
'cyan': '\033[0;36m',
'green': '\033[0;32m',
'red': '\033[0;31m',
'purple': '\033[0;35m',
'yellow': '\033[0;33m',
'light_grey': '\033[0;37m',
'colour_end': '\033[0m'
}
@classmethod
def black(cls, text):
return cls.colours['black'] + text + cls.colours['colour_end']
@classmethod
def blue(cls, text):
return cls.colours['blue'] + text + cls.colours['colour_end']
@classmethod
def cyan(cls, text):
return cls.colours['cyan'] + text + cls.colours['colour_end']
@classmethod
def green(cls, text):
return cls.colours['green'] + text + cls.colours['colour_end']
@classmethod
def red(cls, text):
return cls.colours['red'] + text + cls.colours['colour_end']
@classmethod
def purple(cls, text):
return cls.colours['purple'] + text + cls.colours['colour_end']
@classmethod
def yellow(cls, text):
return cls.colours['yellow'] + text + cls.colours['colour_end']
@classmethod
def light_grey(cls, text):
return cls.colours['light_grey'] + text + cls.colours['colour_end']
|
from stag.parser.python_ast_parser import Parser
class Plugin:
@property
def name(self):
return 'stag_python'
def patterns(self):
return [
'*.py',
# TODO: pyx? SConstruct/script?
]
def create_parser(self):
return Parser()
|
from django.contrib import admin
from .models import account_data
# Register your models here.
class UserData(admin.ModelAdmin):
list_display = ['username', 'key', 'value']
list_filter = ['username']
admin.site.register(account_data, UserData) |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
# import funkcí z jiného adresáře
import os
import os.path
import pytest
path_to_script = os.path.dirname(os.path.abspath(__file__))
import unittest
import numpy as np
import sys
try:
import skelet3d
data3d = np.ones([3, 7, 9])
data3d[:, 3, 3:6] = 0
skelet3d.skelet3d(data3d)
skelet3d_installed = True
# skelet3d
except:
skelet3d_installed = False
logger.warning("skelet3d is not working")
try:
import larcc
larcc_installed = True
except:
larcc_installed = False
logger.warning("larcc is not working")
import teigen.tree
from teigen.tree import TreeBuilder
# There is some problem with VTK. Code seams to be fine but it fails
# Generic Warning: In /tmp/vtk20150408-2435-1y7p97u/VTK-6.2.0/Common/Core/vtkObjectBase.cxx, line 93
# Trying to delete object with non-zero reference count.
# ERROR: In /tmp/vtk20150408-2435-1y7p97u/VTK-6.2.0/Common/Core/vtkObject.cxx, line 156
# vtkObject (0x11a26e760): Trying to delete object with non-zero reference count.
VTK_MALLOC_PROBLEM = True
#
class TubeTreeTest(unittest.TestCase):
def setUp(self):
self.interactivetTest = False
# interactivetTest = True
@pytest.mark.LAR
@unittest.skipIf(not ("larcc" in sys.modules), "larcc is not installed")
def test_vessel_tree_lar(self):
import teigen.tb_lar
tvg = TreeBuilder(teigen.tb_lar.TBLar)
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
output = tvg.buildTree() # noqa
if self.interactiveTests:
tvg.show()
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_nothing(self):
logger.debug("skelet3d_installed", skelet3d_installed)
# import ipdb; ipdb.set_trace()
self.assertTrue(False)
# @unittest.skip("test debug")
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_vessel_tree_vtk(self):
tvg = TreeBuilder('vtk')
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
output = tvg.buildTree() # noqa
# tvg.show()
# tvg.saveToFile("tree_output.vtk")
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
@unittest.skipIf(not ("skelet3d" in sys.modules), "skelet3d is not installed")
@unittest.skipIf(not skelet3d_installed, "skelet3d is not installed")
def test_vessel_tree_vtk_from_skeleton(self):
logger.debug("skelet3d_installed", skelet3d_installed)
import skelet3d
import skelet3d.skeleton_analyser
import shutil
fn_out = 'tree.vtk'
if os.path.exists(fn_out):
os.remove(fn_out)
volume_data = np.zeros([3, 7, 9], dtype=np.int)
volume_data[:, :, 1:3] = 1
volume_data[:, 5, 2:9] = 1
volume_data[:, 0:7, 5] = 1
skelet = skelet3d.skelet3d(volume_data)
skan = skelet3d.skeleton_analyser.SkeletonAnalyser(skelet, volume_data=volume_data, voxelsize_mm=[1, 1, 1])
stats = skan.skeleton_analysis()
tvg = TreeBuilder('vtk')
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
tvg.tree_data = stats
output = tvg.buildTree() # noqa
tvg.saveToFile(fn_out)
os.path.exists(fn_out)
# TODO finish this test
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_vessel_tree_vol(self):
import teigen.tb_volume
tvg = TreeBuilder(teigen.tb_volume.TBVolume)
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
output = tvg.buildTree() # noqa
# tvg.show()
# if self.interactiveTests:
# tvg.show()
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_import_new_vt_format(self):
tvg = TreeBuilder()
yaml_path = os.path.join(path_to_script, "vt_biodur.yaml")
tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [150, 150, 150]
data3d = tvg.buildTree()
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_cylinders_generator(self):
from teigen.generators.cylinders import CylinderGenerator
cg = CylinderGenerator()
cg.run()
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_vtk_tree(self):
import numpy as np
tree_data = {
}
element_number = 1
area_size = 100
radius = 5
np.random.seed(0)
pts1 = np.random.random([element_number, 3]) * (area_size - 4 * radius) + 2 * radius
pts2 = np.random.random([element_number, 3]) * (area_size - 4 * radius) + 2 * radius
for i in range(element_number):
edge = {
# "nodeA_ZYX_mm": vor3.vertices[simplex],
# "nodeB_ZYX_mm": vor3.vertices[simplex],
"nodeA_ZYX_mm": pts1[i],
"nodeB_ZYX_mm": pts2[i],
"radius_mm": radius
}
tree_data[i] = edge
tvg = TreeBuilder('vtk')
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
# tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [area_size, area_size, area_size]
tvg.tree_data = tree_data
output = tvg.buildTree() # noqa
# tvg.show()
tvg.saveToFile("test_tree_output.vtk")
# @unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
@unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_tree_generator(self):
import numpy as np
tree_data = {
}
element_number = 6
np.random.seed(0)
pts = np.random.random([element_number, 3]) * 100
# construct voronoi
import scipy.spatial
import itertools
vor3 = scipy.spatial.Voronoi(pts)
# for i, two_points in enumerate(vor3.ridge_points):
for i, simplex in enumerate(vor3.ridge_vertices):
simplex = np.asarray(simplex)
# fallowing line removes all ridges with oulayers
simplex = simplex[simplex > 0]
if np.all(simplex >= 0):
x = vor3.vertices[simplex, 0]
y = vor3.vertices[simplex, 1]
z = vor3.vertices[simplex, 2]
for two_points in itertools.combinations(simplex, 2):
edge = {
# "nodeA_ZYX_mm": vor3.vertices[simplex],
# "nodeB_ZYX_mm": vor3.vertices[simplex],
"nodeA_ZYX_mm": vor3.vertices[two_points[0]],
"nodeB_ZYX_mm": vor3.vertices[two_points[1]],
"radius_mm": 2
}
tree_data[i] = edge
else:
pass
show_input_points = False
if show_input_points:
length = len(tree_data)
for i in range(element_number):
edge = {
# #"nodeA_ZYX_mm": np.random.random(3) * 100,
"nodeA_ZYX_mm": pts[i - 1],
"nodeB_ZYX_mm": pts[i],
# "nodeB_ZYX_mm": np.random.random(3) * 100,
"radius_mm": 1
}
tree_data[i + length] = edge
tvg = TreeBuilder('vtk')
yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
# tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = [1, 1, 1]
tvg.shape = [100, 100, 100]
tvg.tree_data = tree_data
output = tvg.buildTree() # noqa
# tvg.show()
tvg.saveToFile("test_tree_output.vtk")
tvgvol = TreeBuilder('vol')
tvgvol.voxelsize_mm = [1, 1, 1]
tvgvol.shape = [100, 100, 100]
tvgvol.tree_data = tree_data
outputvol = tvgvol.buildTree()
tvgvol.saveToFile("tree_volume.pklz")
# self.assertTrue(False)
# @unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_io3d(self):
import io3d
data3d = np.zeros([10, 10, 10])
segmentation = np.zeros([10, 10, 10])
data3d[2:7, :3:5, :6] = 100
datap = {
"data3d": data3d,
# "segmentation": segmentation,
"voxelsize_mm": [1, 1, 1]
}
io3d.write(datap, "file1.pklz")
# @unittest.skipIf(VTK_MALLOC_PROBLEM, "VTK malloc problem")
def test_skimage_io_imsave(self):
import skimage.io
data3d = np.zeros([10, 10, 10])
segmentation = np.zeros([10, 10, 10])
data3d[2:7, :3:5, :6] = 100
datap = {
"data3d": data3d,
# "segmentation": segmentation,
"voxelsize_mm": [1, 1, 1]
}
skimage.io.imsave("skiamge.png", data3d[0])
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python2
import sys
from PyQt4 import QtGui, QtCore
from clusterAMMControl import AMMControl
from clusterSwitchControl import SwitchControl
from clusterBladeControl import BladeControl
from clusterBladeCenterPanel import BladeCenterPanel
from clusterRTOP import RtopPanel
from clusterMediaTray import ClusterMediaTray
#############################
#
# The input for this widget will be the pass in of "drilDownData" as specified in clusterTopoClusterLevel.py
# The current placeholder is, dynamicInfo
#
############################
class TopoBCenterLevel(QtGui.QWidget):
def __init__(self, data):
super(TopoBCenterLevel, self).__init__()
self.font = QtGui.QFont()
self.initUI(data)
def slotBlade(self, dynamicData):
self.blade = BladeControl(dynamicData)
self.blade.exec_()
def slotAMM(self):
self.AMM = AMMControl()
self.AMM.exec_()
def slotSwitch(self):
self.Switch = SwitchControl()
self.Switch.exec_()
def slotBCPanel(self):
self.bcControl = BladeCenterPanel()
self.bcControl.exec_()
def slotRtop(self):
self.Rtop = RtopPanel()
self.Rtop.exec_()
def slotMediaTray(self, bladeAssign):
self.trayAssignment = ClusterMediaTray(bladeAssign)
self.trayAssignment.exec_()
def makeActionButton(self, name, tooltip, sizeX, sizeY):
button = QtGui.QPushButton(name)
button.setToolTip(tooltip)
button.setMinimumSize(sizeX, sizeY)
self.font.setBold(True)
button.setFont(self.font)
color = button.palette()
color.setColor(button.backgroundRole(), QtGui.QColor(0,102,187))
color.setColor(QtGui.QPalette.ButtonText, QtGui.QColor(255,255,255))
button.setPalette(color)
return button
def makeHardwareButton(self, label, state, sizeX, sizeY, drillDownData, statusInfoArray):
button = QtGui.QPushButton(label)
button.setMinimumSize(sizeX, sizeY)
self.font.setBold(True)
button.setFont(self.font)
# Make Tooltip to display status of hardware button
# checkIfLast - check if last element being added to the string; removes the '\n' from last line
statusString = ''
checkIfLast = 0
for i in statusInfoArray:
if checkIfLast != len(statusInfoArray) - 1:
statusString += i + '\n'
else:
statusString += i
checkIfLast += 1
button.setToolTip(statusString)
buttonsColor = button.palette()
if state == 0: #HARDWARE UP AND GOOD STATE
buttonsColor.setColor(button.backgroundRole(), QtGui.QColor(0,102,0))
buttonsColor.setColor(QtGui.QPalette.ButtonText, QtGui.QColor(255,255,255))
elif state == 1: #HARDWARE DOWN OR BAD STATE
buttonsColor.setColor(button.backgroundRole(), QtGui.QColor(221,0,0))
buttonsColor.setColor(QtGui.QPalette.ButtonText, QtGui.QColor(255,255,255))
elif state == 2: #HARDWARE HAS NON-CRITICAL ISSUES
buttonsColor.setColor(button.backgroundRole(), QtGui.QColor(255,221,68))
buttonsColor.setColor(QtGui.QPalette.ButtonText, QtGui.QColor(255,255,255))
elif state == 3: #NO HARDWARE PRESENT STATE
button.setEnabled(False)
button.setPalette(buttonsColor)
# What are we connecting the button to?
if ('ADVANCED\nMANAGEMENT\nMODULE' == label):
button.clicked.connect(self.slotAMM)
elif ('SWITCH\nMODULE\n001' == label):
button.clicked.connect(self.slotSwitch)
elif ('SWITCH\nMODULE\n002' == label):
button.clicked.connect(self.slotSwitch)
# The media tray is a special case and the drillDown data passed for its call
# is meant to set a parameter to the first blade in the bladecenter as default
# for the media tray assignment, this will need to be swapped for the actual assignment of
# the media tray, when the backend in implemented
elif ('MEDIA\nTRAY' == label):
button.clicked.connect(lambda: self.slotMediaTray(drillDownData[0][0]))
else:
button.clicked.connect(lambda: self.slotBlade(drillDownData))
return button
def initUI(self, data):
# This will be used to to set the window's layout
self.layout = QtGui.QVBoxLayout()
topBarLayout = QtGui.QHBoxLayout()
topLeftLayout = QtGui.QHBoxLayout()
topRightLayout = QtGui.QHBoxLayout()
topLeftFrame = QtGui.QFrame()
topRightFrame = QtGui.QFrame()
topLeftFrame.setFrameShape(QtGui.QFrame.StyledPanel)
topRightFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.backButton = self.makeActionButton('BACK', 'Return to high-level view', 158, 60)
self.bladeRebootButton = self.makeActionButton('REBOOT\nERROR BLADES', 'Perform automated shutdown\nand boot up procedures\nto all blades with errors\nin this blade center', 156, 60)
self.bcPanelButton = self.makeActionButton('BLADE CENTER\nPANEL', 'Open information panel\nfor the blade center', 158, 60)
self.rtopButton = self.makeActionButton('RTOP', 'Launch blade, LCCA,\nand management node\nmonitor utility', 158, 60)
self.bcPanelButton.clicked.connect(self.slotBCPanel)
self.rtopButton.clicked.connect(self.slotRtop)
topLeftLayout.addWidget(self.backButton)
topRightLayout.addWidget(self.bcPanelButton)
topRightLayout.addWidget(self.bladeRebootButton)
topRightLayout.addWidget(self.rtopButton)
topLeftFrame.setLayout(topLeftLayout)
topRightFrame.setLayout(topRightLayout)
topBarLayout.addWidget(topLeftFrame)
topBarLayout.addWidget(topRightFrame)
# Make grid layout for the array buttons
bladeGrid = QtGui.QGridLayout()
hardwareGrid = QtGui.QGridLayout()
# ----------------- THE DATA! ---------------------
# machineDataArray - based on the data passed in
#
# Template, each entry - [ bladeNum, bladeState, [ cpuRate, memoryRate, rx, tx ] ]
#dynamicInfo = [ [ '1', 0, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '2', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '3', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '4', 2, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '5', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '6', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '7', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '8', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '9', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '10', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '11', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '12', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '13', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ '14', 1, [ 100, 10, '85KB', '100KB' ], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ 'AMM', 1, [], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ 'SWITCH\nMODULE\n001', 1, [], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ 'SWITCH\nMODULE\n002', 1, [], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ],
# [ 'MEDIA\nTRAY', 1, [], ['ON', 'PING-ABLE', 'SSH-ABLE', 'NO ERRORS'] ] ]
j = 0 # Check for the first 14 blades
k = 0 # Incrementor
machineDataArray = data[3]
mediaTrayAssignment = data[3][0][0] # NEEDS TO BE UPDATED (PLACEHOLDER)
for i in machineDataArray:
if (j < 14):
button = self.makeHardwareButton(i[0], i[1], 43, 300, i, i[3])
bladeGrid.addWidget(button, 0, j)
j = j + 1
else:
if ('ADVANCED' in i[0]):
button = self.makeHardwareButton(i[0], i[1], 169, 135, i, i[3])
elif ('SWITCH' in i[0]):
button = self.makeHardwareButton(i[0], i[1], 169, 135, i, i[3])
elif ('MEDIA' in i[0]):
# MUST CHANGE: Defaulting to giving the first blade in a blade center as the
# assignment of the media tray. When the back end is implemented, this value
# will need to be changed to whatever the media tray gives back for its assignment
button = self.makeHardwareButton(i[0], i[1], 169, 135, machineDataArray, i[3])
# else: Throw an error
hardwareGrid.addWidget(button, 0, k)
k = k + 1
# -------------Matryoshka Assembly!---------------------
# Widgets: should each be contained in a layout
# Layouts: each get added to the main layout
self.layout.addLayout(topBarLayout)
self.layout.addLayout(bladeGrid)
self.layout.addLayout(hardwareGrid)
# Finalize layout of window and show
self.setLayout(self.layout)
self.move(300, 150)
self.setWindowTitle('BLADE CENTER VIEW')
self.setGeometry(50, 50, 700, 500)
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = TopoBCenterLevel()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
# Generated by Django 2.2 on 2019-12-13 14:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0019_epospayment'),
]
operations = [
migrations.AlterModelOptions(
name='epospayment',
options={'verbose_name': 'Платеж физ. лица', 'verbose_name_plural': 'Платежи физ. лиц'},
),
migrations.AddField(
model_name='epospayment',
name='is_passed',
field=models.BooleanField(default=False, verbose_name='Подтвержден'),
),
]
|
from text_query_handlers import AbstractHandler as ah
import re
from random import randint
import ConstantsAndUtils as cau
class ProxyCallHandler(ah.AbstractHandler):
match_regex = "(/message: (.*)\n?/id: (.*))"
def predicate(self, message):
return len(re.findall(self.match_regex, message.text)) > 0 \
and message.from_user.id == cau.master_id
def handle(self, bot, message):
text = message.text
mess = re.search(self.match_regex, text).group(2)
id = re.search(self.match_regex, text).group(3)
bot.send_message(id, mess)
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 10:15:05 2019
@author: user
"""
"""
N값, Group 수정
N값 자동화함. Group 지정만,
"""
# In[] Group 지정
highGroup = [0,2,3,4,5,6,8,9,10,11,59] # 5%
# exclude 7, 이유: basline부터 발이 부어있음. inter phase에 movement ratio가 매우 이례적임 (3SD 이상일듯)
# 1추가 제거
midleGroup = [20,21,22,23,24,25,26,57] # 1%
restrictionGroup = [27,28,29,30,43,44,45] # restriction 5%
lowGroup = [31,32,33,35,36,37,38] # 0.25% # exclude 34는 overapping이 전혀 안됨
salineGroup = [12,13,14,15,16,17,18,19,47,48,52,53,56,58] # control
ketoGroup = [39,40,41,42,46,49,50]
lidocaineGroup = [51,54,55]
capsaicinGroup = [60,61,62,64,65,82,83,104,105]
yohimbineGroup = [63,66,67,68,69,74]
pslGroup = [70,71,72,73,75,76,77,78,79,80,84,85,86,87,88,93,94]
shamGroup = [81,89,90,91,92,97]
adenosineGroup = [98,99,100,101,102,103,110,111,112,113,114,115]
CFAgroup = [106,107,108,109,116,117]
highGroup2 = [95,96] # 학습용, late ,recovery는 애초에 분석되지 않음, base movement data 없음
chloroquineGroup = [118,119,120,121,122,123,124,125,126,127]
itSalineGroup = [128,129,130,134,135,138,139,140]
itClonidineGroup = [131,132,133,136,137] # 132 3일차는 it saline으로 분류되어야함.
ipsaline_pslGroup = [141,142,143,144,145,146,147,148,149,150,152,155,156,158,159]
ipclonidineGroup = [151,153,154,157,160,161,162,163]
gabapentinGroup = [164,165,166,167,168,169,170,171,172,173,174,175,176,177, \
178,179,180,181,182,183,184,185,186]
beevenomGroup = [187]
msset = [[70,72],[71,84],[75,85],[76,86],[79,88],[78,93],[80,94]]
msset2 = [[98,110],[99,111],[100,112],[101,113],[102,114],[103,115], \
[134,135],[136,137],[128,138],[130,139],[129,140],[144,147],[145,148],[146,149], \
[153,154],[152,155],[150,156],[151,157],[158,159],[161,160],[162,163],[167,168], \
[169,170],[172,173],[174,175],[177,178],[179,180]] # baseline 독립, training 때 base를 skip 하지 않음.
msGroup = dict()
msGroup['highGroup'] = highGroup
msGroup['midleGroup'] = midleGroup
msGroup['restrictionGroup'] = restrictionGroup
msGroup['lowGroup'] = lowGroup
msGroup['salineGroup'] = salineGroup
msGroup['ketoGroup'] = ketoGroup
msGroup['lidocaineGroup'] = lidocaineGroup
msGroup['capsaicinGroup'] = capsaicinGroup
msGroup['yohimbineGroup'] = yohimbineGroup
msGroup['pslGroup'] = pslGroup
msGroup['shamGroup'] = shamGroup
msGroup['adenosineGroup'] = adenosineGroup
msGroup['highGroup2'] = highGroup2
msGroup['CFAgroup'] = CFAgroup
msGroup['chloroquineGroup'] = chloroquineGroup
msGroup['itSalineGroup'] = itSalineGroup
msGroup['itClonidineGroup'] = itClonidineGroup
msGroup['ipsaline_pslGroup'] = ipsaline_pslGroup
msGroup['ipclonidineGroup'] = ipclonidineGroup
msGroup['ipclonidineGroup'] = ipclonidineGroup
msGroup['gabapentinGroup'] = gabapentinGroup
msGroup['beevenomGroup'] = beevenomGroup
msGroup['msset'] = msset
msGroup['msset2'] = msset2
import numpy as np
import pandas as pd
import os
import sys
msdir = 'D:\\mscore\\code_lab'; sys.path.append(msdir)
import msfilepath
import pickle
import hdf5storage
import matplotlib.pyplot as plt
endsw=False; cnt=-1
while not(endsw):
cnt += 1
_, _, _, endsw = msfilepath.msfilepath1(cnt)
N = cnt; N2 = N
print('totnal N', N)
FPS = 4.3650966869
runlist = range(N)
# In
#import sys
#msdir = 'C:\\code_lab'; sys.path.append(msdir)
#from scipy.signal import find_peaks
def errorCorrection(msraw): # turboreg로 발생하는 에러값을 수정함.
sw = 0
for col in range(msraw.shape[1]):
for row in range(msraw.shape[0]):
if msraw[row,col] > 10**4 or msraw[row,col] < -10**4 or np.isnan(msraw[row,col]):
sw = 1
print('at '+ str(row) + ' ' + str(col))
print('turboreg error value are dectected... will process after correction')
try:
msraw[row,col] = msraw[row+1,col]
print(msraw[row+1,col])
except:
print('error, can not fullfil')
return msraw, sw
def smoothListGaussian(array1,window):
window = round(window)
degree = (window+1)/2
weight=np.array([1.0]*window)
weightGauss=[]
for i in range(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(np.exp((4*(frac))**2))
weightGauss.append(gauss)
weight=np.array(weightGauss)*weight
smoothed=[0.0]*(array1.shape[0]-window)
weight = weight / np.sum(weight) # nml
for i in range(len(smoothed)):
smoothed[i]=sum(np.array(array1[i:i+window])*weight)/sum(weight)
return smoothed
skipsw = False
def mssignal_save(list1):
newformat = list(range(70,N2))
newformat.remove(74)
for N in list1:
if N not in newformat:
print('signal preprosessing...', N)
path, behav_data, raw_filepath, _ = msfilepath.msfilepath1(N)
savename = path + '\\signal_save.xlsx'
if os.path.exists(savename) and skipsw:
print('이미 처리됨. skip', savename)
continue
loadpath = path + '\\' + raw_filepath
df = pd.read_excel(loadpath)
ROI = df.shape[1]
for col in range(df.shape[1]):
if np.isnan(df.iloc[0,col]):
ROI = col-1
break
print(str(N) + ' ' +raw_filepath + ' ROI ' + str(ROI-1)) # 시간축 제외하고 표기
timeend = df.shape[0]
for row in range(df.shape[0]):
if np.isnan(df.iloc[row,0]):
timeend = row
break
msraw = np.array(df.iloc[:timeend,:ROI])
print(str(N) + ' max ' + str(np.max(np.max(msraw))) + ' min ' + str(np.min(np.min(msraw))))
while True:
msraw, sw = errorCorrection(msraw)
if sw == 0:
break
# session 나눔
phaseInfo = pd.read_excel(loadpath, sheet_name=2, header=None)
s = 0; array2 = list()
for ix in range(phaseInfo.shape[0]):
for frame in range(msraw.shape[0]):
if abs(msraw[frame,0] - phaseInfo.iloc[ix,0]) < 0.00001:
print(N,s,frame)
array2.append(np.array(msraw[s:frame,1:]))
s = frame;
if ix == phaseInfo.shape[0]-1:
array2.append(np.array(msraw[s:,1:]))
elif N in newformat:
print('signal preprosessing...', N)
path, behav_data, raw_filepath, _ = msfilepath.msfilepath1(N)
savename = path + '\\signal_save.xlsx'
if os.path.exists(savename) and skipsw:
print('이미 처리됨. skip', savename)
continue
loadpath = path + '\\' + raw_filepath
array0 = []; array2 =[]; k = -1
while True:
k += 1
print('k', k)
try:
df = pd.read_excel(loadpath, sheet_name=k, header=None)
array0.append(df)
except:
break
print(N, 'newformat으로 처리됩니다.', 'total session #', k)
for se in range(k):
ROI = array0[se].shape[1]
for col in range(array0[se].shape[1]):
if np.isnan(array0[se].iloc[0,col]):
ROI = col-1
print(N, 'NaN value로 인하여 data 수정합니다.')
break
timeend = array0[se].shape[0]
for row in range(array0[se].shape[0]):
if np.isnan(array0[se].iloc[row,0]):
timeend = row
print(N, 'NaN value로 인하여 data 수정합니다.')
break
array0[se] = np.array(array0[se].iloc[:timeend,:ROI])
print(str(N) + ' max ' + str(np.max(np.max(array0[se]))) + \
' min ' + str(np.min(np.min(array0[se]))))
msraw = np.array(array0[se])
while True:
msraw, sw = errorCorrection(msraw)
if sw == 0:
break
array0[se] = np.array(msraw)
array2.append(np.array(array0[se][:,1:]))
print(str(N) + ' ' +raw_filepath + ' ROI ', array2[0].shape[1])
array3 = list() # after gaussian filter
for se in range(len(array2)):
matrix = np.array(array2[se])
tmp_matrix = list()
for neuronNum in range(matrix.shape[1]):
tmp_matrix.append(smoothListGaussian(matrix[:,neuronNum], 10))
tmp_matrix = np.transpose(np.array(tmp_matrix))
array3.append(tmp_matrix)
array4 = list()
for se in range(len(array3)):
matrix = np.array(array3[se])
matrix = np.array(list(matrix[:,:]), dtype=np.float)
# In F zero 계산
f0_vector = list()
for n in range(matrix.shape[1]):
msmatrix = np.array(matrix[:,n])
f0 = np.mean(np.sort(msmatrix)[0:int(round(msmatrix.shape[0]*0.3))])
f0_vector.append(f0)
if False:
plt.figure(n, figsize=(18, 9))
plt.title(n)
plt.plot(msmatrix)
aline = np.zeros(matrix[:,0].shape[0]); aline[:] = f0
plt.plot(aline)
print(f0, np.median(msmatrix))
# In
f0_vector = np.array(f0_vector)
f_signal = np.zeros(matrix.shape)
for frame in range(matrix.shape[0]):
f_signal[frame,:] = (array2[se][frame, :] - f0_vector) / f0_vector
array4.append(f_signal)
with pd.ExcelWriter(savename) as writer:
for se in range(len(array4)):
msout = pd.DataFrame(array4[se], index=None, columns=None)
msout.to_excel(writer, sheet_name='Sheet'+str(se+1), index=False, header=False)
return None
# In
def msMovementExtraction(list1):
# movement_thr_save = np.zeros((N2,5))
for N in list1:
path, behav_data, raw_filepath, _ = msfilepath.msfilepath1(N)
behav_data_ms = list()
for i in range(len(behav_data)):
tmp = behav_data[i][0:3]
behav_data_ms.append(tmp + '.avi.mat')
for i in range(len(behav_data_ms)):
loadpath = path + '\\' + behav_data_ms[i]
savename = path + '\\' + 'MS_' + behav_data[i]
if os.path.exists(savename) and skipsw:
print('이미 처리됨. skip', savename)
continue
df = hdf5storage.loadmat(loadpath)
diffplot = df['msdiff_gauss']
diffplot = np.reshape(diffplot, (diffplot.shape[1]))
msmatrix = np.array(diffplot)
msmax = np.max(msmatrix); msmin = np.min(msmatrix); diff = (msmax - msmin)/10
tmpmax = -np.inf; savemax = np.nan
for j in range(10):
c1 = (msmatrix >= (msmin + diff * j))
c2 = (msmatrix < (msmin + diff * (j+1)))
# print(np.sum(c1 * c2), j)
if tmpmax < np.sum(c1 * c2):
tmpmax = np.sum(c1 * c2); savemax = j
c1 = (msmatrix >= (msmin + diff * savemax))
c2 = (msmatrix < (msmin + diff * (savemax+1)))
mscut = np.mean(msmatrix[(c1 * c2)])
thr = mscut + 0.15
# 예외 규정
if N == 10 and i == 0:
thr = 1.5
if N == 10 and i == 2:
thr = 1.5
if N == 10 and i == 4:
thr = 1.5
if N == 14 and i == 2:
thr = mscut + 0.05
if N == 14 and i == 3:
thr = mscut + 0.05
if N == 25 and i == 3:
thr = 0.9
if N == 26 and i == 2:
thr = mscut + 0.20
if N == 25 and i == 3:
thr = 0.5
if N == 42 and i == 2:
thr = 1.8
if N == 42 and i == 3:
thr = 1.8
if N == 43:
thr = 0.76
if N == 45:
thr = 1
if N == 57 and i == 1:
thr = 1.25
if N == 44 and i == 0:
thr = 0.8
if N == 73 and i == 0:
thr = 1
if N == 76 and i == 0:
thr = 1
if N == 83 and i == 1:
thr = 1.1
if N == 86 and i == 0:
thr = 1
if N == 87 and i == 2:
thr = 0.93
if N == 90 and i == 1:
thr = 0.65
if N == 91 and i == 0:
thr = 0.55
if N == 91 and i == 1:
thr = 0.65
if N == 97 and i == 0:
thr = 0.53
if N == 97 and i == 1:
thr = 0.63
if N == 97 and i == 2:
thr = 0.8
if N == 99 and i in [0,1]:
thr = 1
if N == 99 and i in [2]:
thr = 1.2
if N == 100 and i in [1]:
thr = 0.9
if N == 101 and i in [2]:
thr = 1
if N == 116 and i in [0]:
thr = 0.9
if N == 127 and i in [1]:
thr = 1
if N == 128 and i in [2]:
thr = 1
if N == 154 and i in [3]:
thr = 1
aline = np.zeros(diffplot.shape[0]); aline[:] = thr
# movement_thr_save[SE,se] = thr
if True:
plt.figure(i, figsize=(18, 9))
ftitle = str(N) + '_' + str(i) + '_' + behav_data_ms[i] + '.png'
plt.title(i)
plt.plot(msmatrix)
print(ftitle, diffplot.shape[0])
plt.plot(aline)
plt.axis([0, diffplot.shape[0], np.min(diffplot)-0.05, 2.5])
savepath = 'D:\\mscore\\syncbackup\\paindecoder\\save\\msplot\\0728_behavior'
if not os.path.exists(savepath):
os.mkdir(savepath)
os.chdir(savepath)
plt.savefig(ftitle)
plt.close(i)
# raw
msmatrix[msmatrix<thr] = 0
savems = msmatrix
msout = pd.DataFrame(savems ,index=None, columns=None)
msout.to_csv(savename, index=False, header=False)
return None
# In[]
#runlist = [77,123,120,106] + list(range(139,N))
runlist = range(187, N)
print('runlist', runlist, '<<<< 확인!!')
mssignal_save(runlist)
msMovementExtraction(runlist)
#N, FPS, signalss, bahavss, baseindex, movement, msGroup, basess = msRun('main')
# In[] signal & behavior import
#signalss = list(); bahavss = list()
signalss=[];[signalss.append([]) for u in range(N2)]
bahavss=[];[bahavss.append([]) for u in range(N2)]
RESULT_SAVE_PATH = msdir + '\\raw_tmpsave\\'
if not os.path.exists(RESULT_SAVE_PATH):
os.mkdir(RESULT_SAVE_PATH)
for SE in range(N2):
print(SE, N)
pickle_savepath = RESULT_SAVE_PATH + str(SE) + '_raw.pickle'
if os.path.isfile(pickle_savepath) and not SE in runlist:
with open(pickle_savepath, 'rb') as f: # Python 3: open(..., 'rb')
msdata_load = pickle.load(f)
signals = msdata_load['signals']
behavs = msdata_load['behavs']
else:
path, behav_data, raw_filepath, _ = msfilepath.msfilepath1(SE)
# loadpath = path + '\\events_save.xlsx'
loadpath2 = path + '\\signal_save.xlsx'
signals = list(); behavs = list() # events = list();
os.chdir(path)
df2 = None
for se in range(5):
try:
df2 = pd.read_excel(loadpath2, header=None, sheet_name=se)
df3 = np.array(pd.read_csv('MS_' + behav_data[se]))
signals.append(np.array(df2))
behavs.append(np.array(df3))
except:
if se < 3:
print('se 3 이하 session은 필수입니다.')
import sys
sys.exit
print(SE, se, 'session 없습니다. 예외 group으로 판단, 이전 session을 복사하여 채웁니다.')
signals.append(np.array(df2))
behavs.append(np.array(df3))
tmp1 = { 'signals' : signals, 'behavs' : behavs}
with open(pickle_savepath, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump(tmp1, f, pickle.HIGHEST_PROTOCOL)
print(pickle_savepath, '저장되었습니다.')
signalss[SE] = signals
bahavss[SE] = behavs
# In
# In QC
# delta df/f0 / frame 이 thr 을 넘기는 경우 이상신호로 간주
thr = 10
for SE in range(N2):
print(SE)
signals = signalss[SE]
rois = np.zeros(signals[0].shape[1])
for se in range(5):
wsw = True
while wsw:
wsw = False
signal = np.array(signalss[SE][se])
for n in range(signal.shape[1]):
msplot = np.zeros(signal.shape[0]-1)
for frame in range(signal.shape[0]-1):
msplot[frame] = np.abs(signal[frame+1,n] - signal[frame,n])
if msplot[frame] > thr and rois[n] < 20:
wsw = True
rois[n] += 1
print(SE, se, n, msplot[frame], frame+1)
signalss[SE][se][frame+1,n] = float(signal[frame,n]) # 변화가 급격한 경우 noise로 간주, 이전 intensity 값으로 대체함.
for se in range(5):
signal = np.array(signalss[SE][se])
signalss[SE][se] = np.delete(signal, np.where(rois==20)[0], 1)
print('ROI delete', SE, se, np.where(rois==20)[0])
# print(signalss[SE][se][frame+1,n], signal[frame,n])
# In nmr factor (ROI 갯수)추정, or ROI 검사 (df/d0 0.3을 한번도 넘지 못한 ROI의 존재 유무)
for SE in range(N):
signals = signalss[SE]
ROIsw = np.zeros(np.array(signals[0]).shape[1])
for n in range(np.array(signals[0]).shape[1]):
sw = 0
for se in range(5):
signal = np.array(signals[se])
if np.max(signal[:,n]) > 0.3: # 0.3에 특별한 의미는 없고, 경험적으로 한번도 0.3을 못넘는 ROI는 발견되지 않음.
ROIsw[n] = 1
break
if np.sum(ROIsw) != np.array(signals[0]).shape[1]:
print("signal이 없는 ROI가 존재함")
# In
from scipy.stats.stats import pearsonr
def msbehav_syn(behav, signal): # behav syn 맞추기
behav = np.array(behav)
signal = np.array(signal)
behav_syn = np.zeros(signal.shape[0])
syn = signal.shape[0]/behav.shape[0]
for behavframe in range(behav.shape[0]):
imagingframe = int(round(behavframe*syn))
if behav[behavframe] > 0 and not imagingframe == signal.shape[0]:
behav_syn[imagingframe] += 1
return behav_syn
# syn를 위한 상수 계산
synsave = np.zeros((N,5))
SE = 6; se = 1
for SE in range(N):
signals = signalss[SE]
behavs = bahavss[SE]
for se in range(5):
signal = np.array(signals[se])
meansignal = np.mean(signal,1)
behav = np.array(behavs[se])
behav_syn = msbehav_syn(behav, signal)
xaxis = list(); yaxis = list()
if np.mean(behav) > 0.01 or (SE == 36 and se == 3):
synlist = np.arange(-300,301,1)
if (SE == 36 and se == 3) or (SE == 1 and se == 2) or (SE == 38 and se == 2) or (SE == 42 and se == 1): # 예외처리
synlist = np.arange(-50,50,1)
for syn in synlist:
syn = int(round(syn))
if syn >= 0:
singal_syn = meansignal[syn:]
sz = singal_syn.shape[0]
behav_syn2 = behav_syn[:sz]
elif syn <0:
singal_syn = meansignal[:syn]
behav_syn2 = behav_syn[-syn:]
msexcept = not((SE == 40 and se == 1) or (SE == 6 and se == 1) or (SE == 8 and se == 3) \
or (SE == 10 and se == 1) or (SE == 10 and se == 3) or (SE == 11 and se == 1) \
or (SE == 15 and se == 2) or (SE == 19 and se == 4) or (SE == 21 and se == 1) \
or (SE == 22 and se == 0) or (SE == 32 and se == 4) or (SE == 34 and se == 0) \
or (SE == 35 and se == 1) or (SE == 36 and se == 0) or (SE == 37 and se == 0) \
or (SE == 37 and se == 1) or (SE == 37 and se == 4) or (SE == 38 and se == 2) \
or (SE == 39 and se == 4) or (SE == 40 and se == 4) or (SE == 41 and se == 1) \
or (SE == 42 and se == 0) or (SE == 41 and se == 1) or (SE == 42 and se == 0) \
or (SE == 42 and se == 1))
if np.sum(behav_syn2) < np.sum(behav_syn) and msexcept:
continue
if not np.sum(behav_syn2) == 0:
r = pearsonr(singal_syn, behav_syn2)[0]
elif np.sum(behav_syn2) == 0:
r = 0
xaxis.append(syn)
yaxis.append(r)
if np.sum(np.isnan(yaxis)) < 0:
print(SE,se, 'nan 있어요')
# plt.plot(xaxis,yaxis)
maxsyn = xaxis[np.argmax(yaxis)]
else:
maxsyn = 0
synsave[SE,se] = maxsyn
# 예외처리
synsave[12,4] = 0
synsave[18,4] = 0
synsave[43,3] = 0
synsave[43,4] = 0
#synsave[39,3] = 0
#SE = 1; se = 1
#SE = 8; se = 4
fixlist = [[1,1],[8,4]]
print('다음 session은 syn가 안맞으므로 수정합니다.')
print(fixlist)
# In
def downsampling(msssignal, wanted_size):
downratio = msssignal.shape[0]/wanted_size
downsignal = np.zeros(wanted_size)
downsignal[:] = np.nan
for frame in range(wanted_size):
s = int(round(frame*downratio))
e = int(round(frame*downratio+downratio))
downsignal[frame] = np.mean(msssignal[s:e])
return np.array(downsignal)
behavss2 = list()
for SE in range(N):
behavss2.append([])
for se in range(5):
msbehav = np.array(bahavss[SE][se])
behav_syn = downsampling(msbehav, signalss[SE][se].shape[0])
if [SE, se] in fixlist:
fix = np.zeros(behav_syn.shape[0])
s = int(synsave[SE,se])
if s > 0:
fix[s:] = behav_syn[:-s]
elif s < 0:
s = -s
fix[:-s] = behav_syn[s:]
plt.plot(np.mean(signalss[SE][se], axis=1))
plt.plot(fix)
else:
fix = behav_syn
behavss2[SE].append(fix)
if True: # 시각화 저장
savepath = 'D:\\mscore\\syncbackup\\paindecoder\\save\\msplot\\0709'
print('signal, movement 시각화는', savepath, '에 저장됩니다.')
os.chdir(savepath)
for SE in runlist:
print('save msplot', SE)
signals = signalss[SE]
behavs = behavss2[SE]
for se in range(5):
behav = np.array(behavs[se])
signal = np.array(signals[se])
plt.figure(SE, figsize=(18, 9))
plt.subplot(411)
for n in range(signal.shape[1]):
msplot = signal[:,n]
plt.plot(msplot)
mstitle = 'msplot_' + str(SE) + '_' + str(se) + '.png'
plt.title(mstitle)
scalebar = np.ones(int(round(signal.shape[0]/FPS)))
plt.subplot(412)
# plt.plot(scalebar)
plt.xticks(np.arange(0, scalebar.shape[0]+1, 5.0))
plt.subplot(413)
msplot = np.median(signal,1)
plt.plot(msplot)
plt.plot(np.zeros(msplot.shape[0]))
plt.xticks(np.arange(0, msplot.shape[0]+1, 50.0))
plt.subplot(414)
msplot = np.mean(signal,1)
plt.plot(behav)
plt.xticks(np.arange(0, behav.shape[0]+1, 500.0))
#
plt.savefig(mstitle)
plt.close(SE)
savepath = 'D:\\mscore\\syncbackup\\google_syn\\mspickle.pickle'
print('savepath', savepath)
msdata = {
'FPS' : FPS,
'N' : N,
'bahavss' : bahavss, # behavior 원본
'behavss2' : behavss2, # behavior frame fix
'msGroup' : msGroup,
'msdir' : msdir,
'signalss' : signalss
}
with open(savepath, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump(msdata, f, pickle.HIGHEST_PROTOCOL)
print('mspickle.pickle 저장되었습니다.')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-11-06 15:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dataentry', '0068_auto_20181024_1729'),
('dataentry', '0071_merge_20181101_1434'),
]
operations = [
]
|
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
import sys
import json
import pytest
from requests.exceptions import HTTPError
from fn_cisco_asa.lib.cisco_asa_client import CiscoASAClient
from resilient_circuits.util import get_function_definition
from resilient_lib import IntegrationError, RequestsCommon
if sys.version_info.major == 2:
from mock import patch
else:
from unittest.mock import patch
PACKAGE_NAME = "fn_cisco_asa"
MOCKED_GLOBAL_OPTS = {
"firewalls": "firewall_1,firewall_2",
"network_object_lists": "BLACKLIST_IN, BLACKLIST_OUT"
}
MOCKED_FIREWALL_OPTS = {
"host": "1.1.1.1",
"username": "username",
"password": "password"
}
def generate_response(content, status):
class simResponse:
def __init__(self, content, status):
self.status_code = status
self.content = content
self.reason = "test"
self.text = json.dumps(content)
self.url = "test"
def json(self):
return json.loads(self.content)
def raise_for_status(self):
"""Same from Requests.Response
Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# # isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
return simResponse(content, status)
class TestCiscoASAClient(object):
""" Tests for the Cisco ASA Client functions"""
@pytest.mark.parametrize(
"func_name", [
('cisco_asa_get_network_objects'),
('cisco_asa_get_network_object_details'),
('cisco_asa_add_artifact_to_network_object_group'),
('cisco_asa_remove_network_object_from_network_object_group'),
])
def test_function_definition(self, func_name):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, func_name)
assert func is not None
@patch('resilient_lib.RequestsCommon.execute_call_v2')
def test_get_network_object_group(self, get_mock):
""" Test get_network_object_group"""
print("Test get_network_object_group\n")
sim_content = {"kind":"object#NetworkObjGroup",
"selfLink":"https://192.168.1.162/api/objects/networkobjectgroups/BLACKLIST_IN",
"name":"BLACKLIST_IN",
"members":[
{
"kind":"IPv4Address",
"value":"192.168.10.1"
},
{
"kind":"IPv4Address",
"value":"192.168.10.2"
},
{
"kind":"IPv4Address",
"value":"192.168.10.3"
}
],
"description":"",
"objectId":"BLACKLIST_IN"
}
rc = RequestsCommon(MOCKED_FIREWALL_OPTS, MOCKED_FIREWALL_OPTS)
asa_client = CiscoASAClient("firewall_1", MOCKED_GLOBAL_OPTS, MOCKED_FIREWALL_OPTS, rc)
get_mock.return_value = generate_response(json.dumps(sim_content), 200)
response = asa_client.get_network_object_group("BLACKLIST_IN")
assert response == sim_content
@patch('resilient_lib.RequestsCommon.execute_call_v2')
def test_get_network_object_details(self, get_mock):
""" Test get_network_object_details"""
print("Test get_network_object_details\n")
sim_content = {"host":{
"kind":"IPv6FQDN",
"value":"www.fqdnipv6.com"
},
"kind":"object#NetworkObj",
"name":"TESTfqdnipv6",
"objectId":"TESTfqdnipv6",
"selfLink":"https://192.168.1.162/api/objects/networkobjects/TESTfqdnipv6"
}
rc = RequestsCommon(MOCKED_FIREWALL_OPTS, MOCKED_FIREWALL_OPTS)
asa_client = CiscoASAClient("firewall_1", MOCKED_GLOBAL_OPTS, MOCKED_FIREWALL_OPTS, rc)
get_mock.return_value = generate_response(json.dumps(sim_content), 200)
response = asa_client.get_network_object("TESTfqdnipv6")
assert response == sim_content
|
import pygame
from resources import get_sheet
# Ce fichier n'est pas utilisé dans le jeu
class AnimatedSprite(pygame.sprite.Sprite):
"""Extended sprite class to animate more easily"""
def __init__(self, **keyargs):
super(AnimatedSprite, self).__init__()
self.animations = dict()
for key, value in keyargs.items():
self.animations[key] = get_sheet(*value)
print(self.animations.keys())
self.key = list(self.animations.keys())[0]
self.frame = 0
self.image = self.animations[self.key][self.frame]
self.rect = self.image.get_rect()
def set_key(self, key):
if key in self.animations.keys():
self.key = key
return
def next(self, loop=True):
if (self.frame+1) >= len(self.animations[self.key]):
if loop:
self.frame = 0
self.update_sprite()
return True
else:
self.frame += 1
self.update_sprite()
return False
def update_sprite(self):
center = self.rect.center
self.image = self.animations[self.key][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
|
from django.db import models
from django.contrib.auth.models import UserManager, User
from django.utils import timezone
from django.db.models.signals import post_save
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User)
date_joined = models.DateTimeField(('date_joined'), default=timezone.now)
objects = UserManager()
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
def __str__(self):
return self.user.username
post_save.connect(create_user_profile, sender=User)
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
def __str__(self):
return self.name
class Spark(models.Model):
name = models.CharField(max_length=128, unique=True)
slug = models.SlugField(db_index=False, blank=True, unique=True)
url = models.URLField()
start_date = models.DateField(blank=False)
end_date = models.DateField(blank=True,null=True)
description = models.TextField(blank = True)
category = models.ForeignKey(Category, default=1)
author = models.ForeignKey(User, null=False)
def __str__(self):
return self.name
|
import re
import requests
header_info = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36"
}
response = requests.get(url='https://www.baidu.com/s?wd=云班课',
headers=header_info)
body_str = response.text
titls = re.findall('"title":"(.+?)","url"',body_str)
print(titls)
contents = re.findall( 'opr-toplist1-subtitle">\s+(.+?)\s+</a>[\s\S]+?([4-9][0-9][0-9]万?)</td>',body_str )
print( contents ) |
#__all__ = ["code","elements"]
#from .elements import CodeElement, ValueElement, Function, Subroutine, TypeExecutable
#from .elements import CustomType, Module, DocGroup, DocElement
#from .code import CodeParser
|
# 飞参和振动相关性分析
from FileUtils.functionUtils import findFolder, readTime, readData, loadFile, printFiles, printColumns, matchData
import scipy.stats as scs
import os
def FPCorrCalculator(vibName, flightTime):
'''
计算输入变量(vibName)与某个架次(flightTime)飞行参数的Pearson's Correlation
返回2个变量corr_list, col_list
corr_list: 相关性系数
col_list: 与相关性系数对应的变量名
(corr_list[i]为col_list[i]与vibName的Person's Correlation)
返回值类型均为list
'''
vibFolder = \
[findFolder(vibName)[i] for i in range(len(findFolder(vibName))) if flightTime in findFolder(vibName)[i]][
0] # 查找振动数据所在数据包名称
vibTime = readTime(vibFolder) # 读取数据包时间序列
vibData = readData(vibFolder, vibName) # 读取振动数据
zeroCor_191207 = loadFile("Savedfile/zeroCor_191207") # zeroCor_191207为191207架次中的衡量变量,与振动响应数据无任何相关性
print("Vib Fetching complete!") # 告知振动数据已经提取完毕
corr_list = [] # 初始化相关性list
col_list = [] # 初始化变量名list
# fileList_ = printFiles++()
# fileFT = [fileList_[i] for i in range(len(fileList_)) if flightTime in fileList_[i]]
for i in range(7): # 在全部数据包中搜索
filenameList = printFiles(os.getcwd()) # 将全部数据包名称存入filenameList变量
filenameStr = filenameList[i] # 将搜索至数据包名称存至filenameStr变量
if (filenameStr.endswith("CAOWEN-664002-32.txt") or filenameStr.endswith("CAOWEN-664003-32.txt") or
filenameStr.endswith("CAOWEN-ANA003-32.txt") or filenameStr.endswith("FCS-664002-16.txt")): # 筛选飞参数据包
print(filenameStr + " Fetching Started!") # 告知开始提取某飞参数据包的数据
# print (filenameStr)
time_i = readTime(filenameStr) # 将该飞参数据的时间数据存至time_i变量
columns_i = printColumns(filenameStr) # 将该飞参数据的变量名称存至columns_i变量
print("In total of " + str(len(columns_i)) + " columns") # 告知一共有多少变量
for j in range(1, len(columns_i)): # 搜索提取的变量名称
print("Start " + str(j) + "/" + str(len(columns_i))) # 告知开始计算某个变量
if (columns_i[j] in zeroCor_191207): # 如果某个变量在zeroCor_191207中
print(columns_i[j] + " Break!") # 告知并跳过该变量的计算
elif ("Accel" in columns_i[j]): # 如果某个变量包括"Accel"在变量名称中,该变量为加速度传感器数据
print(columns_i[j] + " Break!") # 告知并跳过该变量的计算
else: # 如果不符合以上两种情况
jCol = readData(filenameStr, columns_i[j]) # 读取飞参数据
jCol_matched = matchData(vibTime, time_i, jCol) # 将飞参数据与振动数据时间轴匹配
corrJ = scs.pearsonr(jCol_matched, vibData)[0] # 计算该飞参数据与振动响应数据的相关性
corr_list.append(corrJ) # 将计算的相关性加入corr_list中
col_list.append(columns_i[j]) # 将变量名称加入col_list
print("Correlation between " + columns_i[j] + " and " + str(vibName) + " is " + str(
corrJ)) # 告知变量名与相关性
print(filenameStr + " Fetching complete!") # 告知飞参变量的读取完毕
return (corr_list, col_list) # 输出corr_list与col_list
|
import motor.motor_asyncio
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import asyncio
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Gsshop():
def __init__(self):
host = "localhost"
port = 27017
self.url = "https://www.gsshop.com/shop/sect/sectL.gs?sectid=1378{}"
self.category_id = [773,781,794,813,825,783]
self.client = motor.motor_asyncio.AsyncIOMotorClient(host, port)
async def extract_data(self, i):
try:
el_img = i.find_element_by_xpath('a[@class="prd-item"]/div[@class="prd-img"]/img')
el_title = i.find_element_by_xpath('a[@class="prd-item"]/dl[@class="prd-info"]/dt[@class="prd-name"]')
el_price = i.find_element_by_xpath('a[@class="prd-item"]/dl[@class="prd-info"]/dd[@class="price-info"]/span/span/strong')
el_url = i.find_element_by_xpath('a[@class="prd-item"]')
# await asyncio.sleep(0.1)
await self.client["shop_db"]["gsshop"].insert_one({
"title": el_title.text.strip(),
"price": el_price.text,
"img": el_img.screenshot_as_base64,
"url": el_url.get_property("href")
})
except Exception as e:
print("Error >>", i.text, e)
async def get_elements(self, url):
try:
options = Options()
options.headless = True
for i in range(1, 4):
driver = webdriver.Chrome(executable_path="./chromedriver.exe",
options=options)
driver.implicitly_wait(2)
driver.get(url)
if i != 1:
driver.get(url+"#0_popular_{}".format(str(i)))
driver.refresh()
elements = WebDriverWait(driver, 5).until(EC.presence_of_all_elements_located((By.XPATH, '//*[@class="prd-list type-4items"]/ul/li')))
# print(url, len(elements))
res = await asyncio.gather(*[self.extract_data(i) for i in elements])
driver.close()
return res
except Exception as e:
print("Error >>", url, e)
async def main(self):
return await asyncio.gather(*[self.get_elements(self.url.format(str(i))) for i in self.category_id])
|
# Find Three Largest Numbers
# O(n)
# n = len(array)
def findThreeLargestNumbers(array):
res = [float('-inf'), float('-inf'), float('-inf')]
for i in array:
if i > res[2]:
res.append(i)
elif i > res[1]:
res.insert(2, i)
elif i > res[0]:
res.insert(1, i)
if len(res) > 3:
res = res[1:]
return res
|
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.six import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Post(models.Model):
#标题
title = models.CharField(max_length=70)
#正文
body = models.TextField()
#创建时间,最后修改时间
created_time = models.DateTimeField()
modified_time = models.DateTimeField()
#文章摘要
excerpt = models.CharField(max_length=200,blank=True)
#分类与标签,定义在上方
category = models.ForeignKey(Category)
tag = models.ManyToManyField(Tag,blank=True)
#作者
author = models.ForeignKey(User)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail',kwargs={'pk':self.pk})
class Meta:
ordering = ['-created_time']
# Create your models here.
|
print("Hello World")
counties_dict = {"Arapahoe": 422829, "Denver": 463353, "Jefferson": 432438}
for county, voters in counties_dict.items():
print(f"{county} county has {voters:,} registered voters.")
my_votes = 3330
total_votes = 15590
percentage_votes = (my_votes / total_votes) * 100
message = (f"You received {my_votes:,} votes. "
f"the total number of votes in the election was {total_votes:,}. "
f"You had {my_votes/total_votes*100:.2f}% of the votes"
)
print(message)
print(f"I received {my_votes/total_votes*100}% of the total votes. ") |
from .c_dataloader import CDataLoader
from .c_dataloader_sklearn import *
from .c_dataloader_svmlight import CDataLoaderSvmLight
from .c_dataloader_imgclients import CDataLoaderImgClients
from .c_dataloader_imgfolders import CDataLoaderImgFolders
from .c_dataloader_mnist import CDataLoaderMNIST
from .c_dataloader_lfw import CDataLoaderLFW
from .c_dataloader_cifar import CDataLoaderCIFAR10, CDataLoaderCIFAR100
from .c_dataloader_icubworld import CDataLoaderICubWorld28
try:
import torch
import torchvision
except ImportError:
pass # pytorch is an extra component
else:
from .c_dataloader_pytorch import CDataLoaderPyTorch
from .c_dataloader_torchvision import CDataLoaderTorchDataset
|
#coding:utf-8
import unittest
import os
from report import HTMLTestRunner_TT
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import time
# base_dir = str(os.path.dirname(os.path.dirname(__file__)))
# base_dir = str(os.path.dirname(os.path.realpath(__file__)))
# cur_path = base_dir.replace("\\","/")
cur_path = str(os.path.dirname(os.path.realpath(__file__)))
# print(base_dir)
print(cur_path)
def add_case(caseName="case",rule="test*.py"):
#待执用例的目录
# case_dir = "C:\\Users\\Administrator\\PycharmProjects\\oldrequest\\case"
#第一步:加载所有的测试用例
#用例文件夹
case_path = os.path.join(cur_path,caseName)
if not os.path.exists(case_path):
os.mkdir(case_path)
print("test case path:%s"%case_path)
#定义discover方法的参数
discover = unittest.defaultTestLoader.discover(case_path,
pattern="test*.py",
top_level_dir=None)
print(discover)
return discover
# case_dir = base_dir+"/case/"
# print(case_dir)
# testcase = unittest.TestSuite()
# discover = unittest.defaultTestLoader.discover(case_dir,
# pattern="test*.py",
# top_level_dir=None)
#discover方法筛选出来的用例,循环添加到测试套件中
# for test_suite in discover:
# for test_case in test_suite:
# #添加用例到testcase
# testcase.addTests(test_case)
# print(testcase)
# return testcase
def send_email():
# --1.跟发邮件相关的参数--
# 发件服务器
#企业邮箱的服务,如果是个人就用smtp.163.com
smtpsever = "smtp.qiye.163.com"
# 端口 port
port = 0
#账号
sender = "tjiang@grandstream.cn"
#密码
psw = "grandstream@jia1"
#接收人(多个收件人时采用list对象)
receiver = ["772831364@qq.com","1026437653@qq.com"]
#--2.这里是邮件的内容
fire_path = cur_path+"\\report\\result.html"
with open(fire_path,'rb') as fp:
mail_body = fp.read()
# fp.close()
msg = MIMEMultipart()
subject = "这个是主题666"
msg['form'] = sender
#多个收件人时,recevier是list,但是这个字典中需要的类型是字符串
msg['to'] = (";").join(receiver)
print((";").join(receiver))
msg['subject'] = subject
#定义邮件正文为html格式
#正文
body = MIMEText(mail_body,"html","utf-8")
#附件
att = MIMEText(mail_body,'base64','utf-8')
att["Content-Type"] = 'application/octet-stream'
att['Content-Disposition'] = 'attachment;filename="result.html"'
msg.attach(body)
msg.attach(att)
print("test email is send")
#--3.发送邮件
try:
smtp = smtplib.SMTP()
#连接服务器
smtp.connect(smtpsever)
#登录
smtp.login(sender,psw)
except:
smtp = smtplib.SMTP_SSL(smtpsever,port)
smtp.login(sender,psw)
smtp.sendmail(sender,receiver,msg.as_string())
smtp.quit()
#生成html报告
def run_case(all_case,reportName="report"):
""""第二步:执行所有的用例,并把结果写入HTML测试报告"""
now = time.strftime("%Y_%m_%d_%H_%M_%S")
#用例文件夹
report_path = os.path.join(cur_path,reportName)
#如果不存在就自动创建一个
if not os.path.exists(report_path):
os.mkdir(report_path)
report_abspath = os.path.join(report_path+"\\result.html")
print(report_path)
print("report path: %s"%report_abspath)
fp = open(report_abspath, "wb")
# runnrer = unittest.TextTestRunner()
# run所有用例
runnrer = HTMLTestRunner_TT.HTMLTestRunner(stream=fp,
title="这是我的自动化测试报告",
description="用例执行情况",
TT_name="jiangtian")
runnrer.run(all_case)
fp.close()
# def get_report_file(report_file):
# #获取最新的测试报告
if __name__ == "__main__":
#返回实例
# report_path = base_dir+"/result.html"
# fp = open(report_path,"wb")
# runnrer = unittest.TextTestRunner()
#run所有用例
# runnrer = HTMLTestRunner_TT.HTMLTestRunner(stream=fp,
# title="这是我的自动化测试报告",
# description="用例执行情况",
# TT_name="jiangtian")
# runnrer.run(add_case())
# fp.close()
#加载用例
all_case = add_case()
#执行用例
run_case(all_case)
#获取最新生成的测试报告文件
report_path = os.path.join(cur_path,"report")
send_email() |
# !/usr/bin/env python2
import rospy
from autominy_msgs.msg import Speed, SpeedCommand, NormalizedSteeringCommand, SteeringAngle, NormalizedSpeedCommand
from nav_msgs.msg import Odometry
import math
import tf
class PDController:
def __init__(self):
# Subscribers
self.sub_speed = rospy.Subscriber("/sensors/speed", Speed, self.callback_speed, queue_size=100)
self.sub_steering = rospy.Subscriber("/sensors/steering", SteeringAngle, self.callback_steering, queue_size=100)
self.sub_localize = rospy.Subscriber("/communication/gps/15", Odometry, self.callback_localize, queue_size=100)
# self.sub_odometry = rospy.Subscriber("/sensors/localization/filtered_map", Odometry, self.callback_odometry, queue_size=100)
# Publishers
self.pub_speed = rospy.Publisher("/actuators/speed", SpeedCommand, queue_size=10)
self.pub_steer = rospy.Publisher("/actuators/steering_normalized", NormalizedSteeringCommand, queue_size=10)
# initial parameters
self.x_orientation = 0.0
self.y_orientation = 0.0
self.z_orientation = 0.0
self.loc_array = []
self.theta = 0.0
self.speed = 0.0
self.steering_angle = 0.0
'''--------------- Callbacks ---------------'''
def callback_steering(self, data):
self.steering_angle = data.value
def callback_speed(self, data):
self.speed = data.value
def callback_localize(self, data):
quaternion = data.pose.pose.orientation
self.x_orientation = quaternion.x
self.y_orientation = quaternion.y
self.z_orientation = quaternion.z
self.loc_array = [quaternion.w, self.x_orientation, self.y_orientation, self.z_orientation]
self.theta = tf.transformations.euler_from_quaternion(self.loc_array)[0]
'''--------------- Execution ---------------'''
def drive(self, desired_orientation):
theta = self.theta
print("Theta is: ", theta)
print("-" * 50)
if desired_orientation == 0:
# Will return a value between 0 and 1 for a steering angle
self.steering_angle = (theta / math.pi)
elif desired_orientation == 180:
if theta < 0:
self.steering_angle = (math.pi + theta) / math.pi
else:
self.steering_angle = (math.pi - theta) / -math.pi
# To make sure that a steering input is being published
else:
self.steering_angle = theta / math.pi
return self.steering_angle
def execute(self, desired_orientation, speed):
# Sleep a few seconds to initialize node
rospy.sleep(2.0)
rate = rospy.Rate(100)
while not rospy.is_shutdown():
steering_angle = self.drive(desired_orientation)
self.pub_steer.publish(NormalizedSteeringCommand(value=steering_angle))
self.pub_speed.publish(value=speed)
rate.sleep()
print("STOPPED")
rospy.spin()
def main(desired_orientation, speed):
try:
rospy.init_node("pd_controller")
pdc = PDController()
pdc.execute(desired_orientation, speed)
except rospy.ROSInterruptException:
print("=== Failed ===")
pdc.pub_speed.publish(value=0.0)
if __name__ == '__main__':
print(40 * "-")
print(" Starting... ")
pdc = PDController()
print(" ")
desired_angle=input("Enter desired orientation. For zero type 0, for Pi type 180: ")
main(desired_angle, 0.2)
pdc.pub_speed.publish(value=0.0)
pdc.pub_steer.publish(value=0.0)
print(" Done ")
print(40 * "-")
|
from Expresiones.Arreglos import Arreglos
class TablaSimbolos:
def __init__(self,nombre, anterior = None):
self.tabla = {}
self.anterior = anterior
self.nombre = nombre
def BuscarIdentificador(self, id):
tablaActual = self
while tablaActual != None:
if id.lower() in tablaActual.tabla :
return tablaActual.tabla[id.lower()]
else:
tablaActual = tablaActual.anterior
return None
def actualizarValor(self, id, valor):
tablaActual = self
while tablaActual != None:
if id in tablaActual.tabla :
tablaActual.tabla[id].setValor(valor)
return None
else:
tablaActual = tablaActual.anterior
return None
def actualizarSimbolo(self, simbolo):
tablaActual = self
while tablaActual != None:
if simbolo.getID() in tablaActual.tabla :
tablaActual.tabla[simbolo.getID()] = simbolo
return None
else:
tablaActual = tablaActual.anterior
self.tabla[simbolo.getID()] = simbolo
return None
def actualizarValorPosicion(self, valor, posicion, id):
tablaActual = self
while tablaActual != None:
if id in tablaActual.tabla :
if isinstance(tablaActual.tabla[id].valor, list):
tablaActual.tabla[id].valor[posicion-1] = valor
return "ok"
else:
tablaActual = tablaActual.anterior
return None
def actualiarValorPosicionMatriz(self,valor,pos1,pos2,id,tree):
tablaActual = self
while tablaActual != None:
if id in tablaActual.tabla :
if isinstance(tablaActual.tabla[id].valor, list):
#tab = tablaActual.tabla[id].valor
#nuevo = []
#if tree.getCont() > 0:
tablaActual.tabla[id].valor[pos1-1][pos2-1] = valor
'''else:
nuevo = self.retornarResultado(None,tablaActual,tab,nuevo)
nuevo[pos1-1][pos2-1] = valor
tablaActual.tabla[id].valor = nuevo
tree.aumentar()'''
return "ok"
else:
tablaActual = tablaActual.anterior
return None
def actualiarValorPosicionDimension3(self,valor,pos1,pos2,pos3,id,tree):
tablaActual = self
while tablaActual != None:
if id in tablaActual.tabla :
if isinstance(tablaActual.tabla[id].valor, list):
nuevo = self.convertir(tree,tablaActual,valor,[])
segundaPasada = self.convertir(tree,tablaActual,nuevo,[])
segundaPasada[pos1-1][pos2-1][pos3-1] = valor
tablaActual.tabla[id].valor = segundaPasada
return "ok"
else:
tablaActual = tablaActual.anterior
return None
def convertir(self,tree,table,item,lista):
if isinstance(item, list):
for i in item:
self.convertir(tree,table,i,lista)
elif isinstance(item, Arreglos):
valor = item.ejecutar(tree,table)
lista.append(valor)
else:
lista.append(item)
return lista
def addSimboloLocal(self, simbolo):
self.tabla[simbolo.getID()] = simbolo
return None
def actualizarSimboloGlobal(self,simbolo):
tablaActual = self
while tablaActual.anterior != None:
tablaActual = tablaActual.anterior
i = simbolo.getID()
tablaActual.tabla[i.lower()] = simbolo
return None
|
#!/usr/bin/env python3
import json
import subprocess
import sys
data = json.load(open('/home/marxin/Downloads/instructions.json'))
descriptions = {}
def get_description(insn):
if insn in descriptions:
return descriptions[insn]
for suffix in ('b', 's', 'l', 'q'):
if insn.endswith(suffix) and insn[:-1] in descriptions:
return descriptions[insn[:-1]]
return None
for extension in data['root']['extension']:
if 'instruction' in extension:
insns = extension['instruction']
if isinstance(insns, dict):
insns = [insns]
for instruction in insns:
asm = instruction['@asm']
summary = instruction['@summary'] if '@summary' in instruction else None
if asm.startswith('{') and ' ' in asm:
asm = asm[asm.find(' ') + 1:]
if ' ' not in asm and summary:
descriptions[asm.lower()] = summary
histogram = {}
total = 0
proc = subprocess.run(f'objdump --no-addresses -dw --no-show-raw-insn {sys.argv[1]}',
shell=True, encoding='utf8', stdout=subprocess.PIPE)
lines = proc.stdout.splitlines()
for line in lines:
if line.startswith('\t'):
insn = line.strip().split(' ')[0]
if insn not in histogram:
histogram[insn] = 0
histogram[insn] += 1
total += 1
instructions = len(histogram)
covered = len([i for i in histogram if get_description(i)])
for insn, count in sorted(histogram.items(), key=lambda x: x[1], reverse=True):
summary = get_description(insn)
if not summary:
summary = ''
print(f'{insn:12s} {count:10d} {100.0 * count / total:.2f}% // {summary}')
print(f'Covered type of instructions: {covered}, total types: {instructions}')
|
import sys
import string
import pprint
try:
xrange
except NameError:
xrange = range
pp = pprint.PrettyPrinter()
def clean_words(words):
return [word.lower().translate(None, string.punctuation) for word in self.idx2word]
def load_data(fileName):
with open(fileName, "r") as fp:
data = fp.read()
splitdata = data.split('\n')
state = []
QActionValues = []
QObjectValues = []
i = 0
while i < (len(splitdata) - 1):
state.append([int(element) for element in splitdata[i].split(' ')])
i += 1
QActionValues.append([float(element) for element in splitdata[i].split(' ')])
i += 1
QObjectValues.append([float(element) for element in splitdata[i].split(' ')])
i += 1
return state, QActionValues, QObjectValues |
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
#import ritesh1
import pandas as pd
import matplotlib.pyplot as plt
data=pd.read_csv('matches.csv')
data1=pd.read_csv("deliveries.csv")
data1.dismissal_kind.fillna('notout',inplace=True)
data1.player_dismissed.fillna('notout',inplace=True)
#data1.fielder.fillna('notout',inplace=True)
class Demo2(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("GET STATS OF ALL SEASONS HERE ")
self.setWindowIcon(QIcon("ipl17.jpg"))
self.setGeometry(7,40,1930, 950)
newfont = QFont("Times",18,QFont.Bold)
self.count = 0
mainMenu = self.menuBar()
operationsMenu1 = mainMenu.addMenu("BATTING ANALYSIS")
operationsMenu2 = mainMenu.addMenu("BOWLING ANALYSIS")
operationsMenu3 = mainMenu.addMenu("FIELDING ANALYSIS")
operationsMenu4 = mainMenu.addMenu("IMPACT OF TOSS")
operationsMenu5 = mainMenu.addMenu("TOP SPORTSPERSON")
operationsMenu6 = mainMenu.addMenu("UMPIRES")
operationsMenu7 = mainMenu.addMenu("RESULTS")
operationsMenu8 = mainMenu.addMenu("TEAMS WIN MARGIN")
operationsMenu9 = mainMenu.addMenu("TEAM STATS")
operationsMenu10 = mainMenu.addMenu("VENUE STATS")
action1 = QAction("TEAM WIN BY 10 WICKETS",self)
action1.setShortcut("Ctrl+N")
action2 = QAction("TEAM WIN BY MORE THAN 100 RUNS",self)
action2.setShortcut("Ctrl+O")
action3 = QAction("TEAM WIN BY JUST 1 WICKET",self)
action3.setShortcut("Ctrl+P")
action4 = QAction("TEAM WIN BY JUST 1 RUN",self)
action4.setShortcut("Ctrl+Q")
action5 = QAction("IMPACT OF TOSS WINNING",self)
action6 = QAction("IMPACT OF TOSS DECISION(FIELD)",self)
action7 = QAction("IMPACT OF TOSS DECISION(BAT)",self)
action8 = QAction("TOP 20 MAN OF THE MATCH",self)
action9 = QAction("TOP 20 FIELDERS",self)
action10 = QAction("TOP 20 WICKET TAKING BOWLERS",self)
action11 = QAction("NO OF MATCHES IN EACH SEASON",self)
action12 = QAction("NO OF WINS BY EACH TEAM",self)
action13 = QAction("NO OF MATCHES AT EACH VENUE",self)
action14 = QAction("VENUES NOT HAVING NORMAL RESULT",self)
action15 = QAction("VENUES WHERE DL IS APPLIED MOST",self)
action16 = QAction("UMPIRE1",self)
action17 = QAction("UMPIRE2",self)
action18 = QAction("TYPES OF MATCH RESULTS",self)
action19 = QAction("DL APPLIED",self)
action20 = QAction("TYPES OF TOSS DECISION",self)
action21 = QAction("TOTAL RUNS SCORED BY TEAMS",self)
action22 = QAction("TOTAL RUNS SCORED IN DIFFERENT OVERS",self)
action23 = QAction("TOTAL RUNS SCORED BY TOP 20 BATSMAN",self)
action24 = QAction("TOTAL RUNS SCORED IN CHASING",self)
action25 = QAction("TOTAL CENTURIES SCORED BY BATSMAN",self)
action26 = QAction("DIFFERENT TYPES OF DISMISSALS",self)
action27 = QAction("TOTAL WICKETS BETWEEN DIFF OVERS",self)
action28 = QAction("TOTAL RUNS PER BALL(<1) CONCEDED BY BOWLERS ",self)
action29 = QAction("TOTAL WICKETS TAKEN BY DIFFERENT BOWLERS",self)
action30 = QAction("MAX WICKETS TAKEN BY DIFFERENT BOWLERS IN DEATH OVERS",self)
action31 = QAction("MIN RUNS PER BALL GIVEN BY DIFFERENT BOWLERS IN DEATH OVERS",self)
action32 = QAction("BEST RUN OUTS BY DIFFERENT FIELDERS",self)
action33 = QAction("BEST CATCHES BY DIFFERENT FIELDERS",self)
action34 = QAction("BEST WICKET KEEPERS",self)
action35 = QAction("TOP 20 HIGHEST RUN SCORERS",self)
operationsMenu8.addAction(action1)
operationsMenu8.addAction(action2)
operationsMenu8.addAction(action3)
operationsMenu8.addAction(action4)
operationsMenu4.addAction(action5)
operationsMenu4.addAction(action6)
operationsMenu4.addAction(action7)
operationsMenu5.addAction(action8)
operationsMenu5.addAction(action9)
operationsMenu5.addAction(action10)
operationsMenu5.addAction(action35)
operationsMenu9.addAction(action11)
operationsMenu9.addAction(action12)
operationsMenu10.addAction(action13)
operationsMenu10.addAction(action14)
operationsMenu10.addAction(action15)
operationsMenu6.addAction(action16)
operationsMenu6.addAction(action17)
operationsMenu7.addAction(action18)
operationsMenu7.addAction(action19)
operationsMenu7.addAction(action20)
operationsMenu1.addAction(action21)
operationsMenu1.addAction(action22)
operationsMenu1.addAction(action23)
operationsMenu1.addAction(action24)
operationsMenu1.addAction(action25)
operationsMenu2.addAction(action26)
operationsMenu2.addAction(action27)
operationsMenu2.addAction(action28)
operationsMenu2.addAction(action29)
operationsMenu2.addAction(action30)
operationsMenu2.addAction(action31)
operationsMenu3.addAction(action32)
operationsMenu3.addAction(action33)
operationsMenu3.addAction(action34)
action1.triggered.connect(self.MARGIN_WICKETS)
action2.triggered.connect(self.MARGIN_RUNS)
action3.triggered.connect(self.close_wickets)
action4.triggered.connect(self.close_runs)
action5.triggered.connect(self.tosswin)
action6.triggered.connect(self.tossfield)
action7.triggered.connect(self.tossbat)
action8.triggered.connect(self.man)
action9.triggered.connect(self.fielder)
action10.triggered.connect(self.bowlers)
action11.triggered.connect(self.number_matches)
action12.triggered.connect(self.number_wins)
action13.triggered.connect(self.venues)
action14.triggered.connect(self.notnormal)
action15.triggered.connect(self.dlvenues)
action16.triggered.connect(self.umpire1)
action17.triggered.connect(self.umpire2)
action18.triggered.connect(self.resulttype)
action19.triggered.connect(self.dlapplied)
action20.triggered.connect(self.tossdecision)
action21.triggered.connect(self.teamruns)
action22.triggered.connect(self.overruns)
action23.triggered.connect(self.batsmanruns)
action24.triggered.connect(self.chasingruns)
action25.triggered.connect(self.centbatsman)
action26.triggered.connect(self.dismissals)
action27.triggered.connect(self.overwickets)
action28.triggered.connect(self.bowlerruns)
action29.triggered.connect(self.bowlers)
action30.triggered.connect(self.maxwickets)
action31.triggered.connect(self.minruns)
action32.triggered.connect(self.runouts)
action33.triggered.connect(self.catches)
action34.triggered.connect(self.keepers)
action35.triggered.connect(self.batsmanruns)
self.initUI()
self.show()
def initUI(self):
newfont = QFont("Times",18,QFont.Bold)
label1 = QLabel('CHOSE PLAYER NAME', self)
label1.setGeometry(QRect(100, 200, 500, 81))
label1.setFont(newfont)
self.combo1 = QComboBox(self)
values = data1.batsman.unique()
values.sort()
self.combo1.addItem("SELECT ANY PLAYER")
self.combo1.addItems(values)
self.combo1.setFont(newfont)
self.combo1.setGeometry(QRect(100, 300, 500, 81))
image = QImage("ipl19.jpg")
sImage = image.scaled(QSize(1900, 1000))
palette = QPalette()
palette.setBrush(10, QBrush(sImage))
self.setPalette(palette)
btn1 = QPushButton('RUNS SCORED BY CHOSEN BATSMAN AGAINST VARIOUS TEAMS', self)
btn1.setGeometry(QRect(100, 500, 1050, 81))
btn1.setFont(newfont)
btn2 = QPushButton('RUNS SCORED BY CHOSEN BATSMAN AGAINST VARIOUS BOWLERS', self)
btn2.setFont(newfont)
btn2.setGeometry(QRect(100, 600, 1050, 81))
btn1.clicked.connect(self.selectedbat1)
btn2.clicked.connect(self.selectedbat2)
self.show()
'''centralWidget=QWidget(self)
centralWidget2 = QWidget(self)
self.combo1 = QComboBox(centralWidget)
values = data1.batsman.unique()
values.sort()
self.combo1.addItem("SELECT ANY PLAYER")
self.combo1.addItems(values)
self.combo1.setFont(newfont)
self.combo1.currentTextChanged.connect(self.MARGIN_WICKETS)
self.combo1.setGeometry(QRect(100, 300, 500, 81))
self.line = QLineEdit(centralWidget2)
self.line.setEchoMode(QLineEdit.PasswordEchoOnEdit)
self.line.setFont(newfont)
self.result = QLabel('Result ')
self.result.setFont(newfont)
self.setCentralWidget(centralWidget)
self.show()'''
def MARGIN_WICKETS(self):
details = (data[data["win_by_wickets"] == 10]["winner"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.35,top=0.94)
plt.xlabel('Team Names',fontsize=20)
plt.ylabel('No Of Matches',fontsize=20)
plt.title("No Of Matches Won By 10 Wickets",fontsize=22)
plt.show()
def MARGIN_RUNS(self):
details = (data[data["win_by_runs"] >= 100]["winner"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.35,top=0.94)
plt.xlabel('Team Names',fontsize=20)
plt.ylabel('No Of Matches',fontsize=20)
plt.title("No Of Matches Won By More Than 100 Runs",fontsize=22)
plt.show()
def close_runs(self):
details = (data[data["win_by_runs"] == 1]["winner"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.35,top=0.94)
plt.xlabel('Team Names',fontsize=20)
plt.ylabel('No Of Matches',fontsize=20)
plt.title("No Of Matches Won By Only 1 Run Left",fontsize=22)
plt.show()
def close_wickets(self):
details = (data[data["win_by_wickets"] == 1]["winner"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.35,top=0.94)
plt.xlabel('Team Names',fontsize=20)
plt.ylabel('No Of Matches',fontsize=20)
plt.title("No Of Matches Won By Only 1 Wicket Left",fontsize=22)
plt.show()
def tosswin(self):
details = data[data["toss_winner"] == data["winner"]]["winner"].value_counts()
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Team Names',fontsize=20)
plt.ylabel('No Of Matches Won',fontsize=20)
plt.title("No Of Matches Won After Winning Toss",fontsize=22)
plt.show()
def tossfield(self):
try:
details=(data[(data["toss_decision"]=="field")&(data["toss_winner"]==data["winner"])]["winner"].value_counts())
details.plot(kind="bar",figsize=(16,8),fontsize=12)
plt.subplots_adjust(bottom=0.35)
plt.xlabel('Team Names',fontsize=20)
plt.ylabel('No Of Matches Won',fontsize=20)
#plt.set_xticks(xlabel)
plt.title("No Of Matches Won After Chosing Fielding",fontsize=22)
plt.show()
except BaseException as ex:
print(ex)
def tossbat(self):
details=(data[(data["toss_decision"]=="bat")&(data["toss_winner"]==data["winner"])]["winner"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Team Names',fontsize=20)
plt.ylabel('No Of Matches Won',fontsize=20)
plt.title("No Of Matches Won After Chosing Batting",fontsize=22)
plt.show()
def man(self):
details=(data["player_of_match"].value_counts()[:20])
#print(details)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Player Names',fontsize=20)
plt.ylabel('No Of Man Of Match Awards',fontsize=20)
plt.title("No Of Man Of Match Awards Won By Top 20 Players",fontsize=22)
plt.show()
def fielder(self):
try:
details = data1[(data1["fielder"].isnull() == False)]["fielder"].value_counts()[:20]
# print(details)
details.plot(kind="bar",figsize=(14,8),fontsize=20)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Player Names',fontsize=20)
plt.ylabel('No Of Catches',fontsize=20)
plt.title("Top 20 Fielders",fontsize=22)
plt.show()
except BaseException as ex:
print(ex)
def bowlers(self):
details = data1[(data1["player_dismissed"] != "notout") & (data1["dismissal_kind"] != "run out")& (data1["dismissal_kind"] != "retired hurt")]["bowler"].value_counts()[:20]
# print(details)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Player Names',fontsize=20)
plt.ylabel('No Of Wickets',fontsize=20)
plt.title("Top 20 Bowlers",fontsize=22)
plt.show()
def number_wins(self):
details = data["winner"].value_counts()
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Team Names',fontsize=20)
plt.ylabel('No Of Matches Won',fontsize=20)
plt.title("No Of Matches Won By Teams",fontsize=22)
plt.show()
def number_matches(self):
details = data["season"].value_counts()
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Season',fontsize=20)
plt.ylabel('No Of Matches ',fontsize=20)
plt.title("No Of Matches In Each Season",fontsize=22)
plt.show()
def venues(self):
details = (data["venue"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Venue Name",fontsize=20)
plt.ylabel("No Of Matches",fontsize=20)
plt.title("No Of Matches At Each Venue",fontsize=22)
plt.show()
def dlvenues(self):
details = data[data["dl_applied"] == 1]["venue"].value_counts()
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Venue Name",fontsize=20)
plt.ylabel("No Of Times DL Applied",fontsize=20)
plt.title("No Of Times DL Applied At A Venue",fontsize=22)
plt.show()
def notnormal(self):
details = (data[data["result"] != "normal"]["venue"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Venue Name",fontsize=20)
plt.ylabel("No Of Result Is Not Normal",fontsize=20)
plt.title("No Of Times Result Is Not Normal At A Venue",fontsize=22)
plt.show()
def umpire1(self):
details = (data["umpire1"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Umpire Name",fontsize=20)
plt.ylabel("No Of Times Umpired",fontsize=20)
plt.title("No Of Times A Umpire Has Done Umpiring In IPL",fontsize=22)
plt.show()
def umpire2(self):
details = (data["umpire2"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Umpire Name",fontsize=20)
plt.ylabel("No Of Times Umpired",fontsize=20)
plt.title("No Of Times A Umpire Has Done Umpiring In IPL",fontsize=22)
plt.show()
def resulttype(self):
details = (data["result"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Result Type",fontsize=20)
plt.ylabel("No Of Matches ",fontsize=20)
plt.title("No Of Times This Result Happened",fontsize=22)
plt.show()
def dlapplied(self):
details = (data["dl_applied"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("DL Applied(1) or Not(0)",fontsize=20)
plt.ylabel("No Of Matches ",fontsize=20)
plt.title("No Of Times DL Is Applied",fontsize=22)
plt.show()
def tossdecision(self):
details = (data["toss_decision"].value_counts())
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Toss Decision",fontsize=20)
plt.ylabel("No Of Matches ",fontsize=20)
plt.title("Which Is More Prefered(Bat Or Field)",fontsize=22)
plt.show()
def teamruns(self):
details = data1.groupby(data1.batting_team).total_runs.sum()
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Team Name",fontsize=20)
plt.ylabel(" Total No Of Runs ",fontsize=20)
plt.title("Total No Of Runs By All Teams",fontsize=22)
plt.show()
def overruns(self):
details = data1.groupby([data1.over]).total_runs.sum()
details.plot(kind="pie",figsize=(14,8))
plt.title("Total Runs In Different Overs",fontsize=22)
plt.show()
def batsmanruns(self):
details = data1.groupby(data1.batsman).batsman_runs.sum().sort_values(ascending=False).head(20)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Batsman Name",fontsize=20)
plt.ylabel(" Total No Of Runs ",fontsize=20)
plt.title("Total No Of Runs By Top 20 Batsman",fontsize=22)
plt.show()
def chasingruns(self):
df = data1.loc[data1.inning == 2, :]
details = df.groupby(data1.batsman).batsman_runs.sum().sort_values(ascending=False).head(20)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Batsman Name",fontsize=20)
plt.ylabel(" Total No Of Runs ",fontsize=20)
plt.title("Total No Of Runs By Top 20 Chasing Batsman",fontsize=22)
plt.show()
def centbatsman(self):
runs_series = data1.groupby([data1.match_id, data1.batsman]).total_runs.sum()
runs_df = pd.DataFrame(runs_series)
runs_df = runs_df.reset_index()
condition = runs_df.total_runs >= 100
player_100df = runs_df.loc[condition, :]
details = player_100df.groupby(player_100df.batsman).total_runs.count().sort_values(ascending=False).head(20)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Batsman Name",fontsize=20)
plt.ylabel(" Total No Of Centuries ",fontsize=20)
plt.title("Total No Of Centuries By Top 20 Batsman",fontsize=22)
plt.show()
def dismissals(self):
details = data1[data1["dismissal_kind"] != "notout"]["dismissal_kind"].value_counts()
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Type Of Dismissal",fontsize=20)
plt.ylabel("No Of Matches ",fontsize=20)
plt.title(" Count Of Dismissal Types",fontsize=22)
plt.show()
def overwickets(self):
condition = (data1.dismissal_kind != 'run out') & (data1.dismissal_kind != 'retired hurt') & (data1.dismissal_kind != 'notout')
df = data1.loc[condition, :]
details = df.groupby([df.over]).over.count()
details.plot(kind="pie",figsize=(14,8),fontsize=12)
plt.xlabel("Over Number",fontsize=20)
plt.ylabel("No Of Wickets",fontsize=20)
plt.title(" Count Of Wickets In Different Overs",fontsize=22)
plt.show()
def bowlerruns(self):
details = data1.groupby(data1.bowler).total_runs.mean().sort_values().head(11)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Bowler Name",fontsize=20)
plt.ylabel("No Of Runs Per Ball",fontsize=20)
plt.title("Top 10 Economic Bowlers",fontsize=22)
plt.show()
def bowlerwickets(self):
details = data1[(data1["player_dismissed"] != "notout") & (data1["dismissal_kind"] != "run out") & (data1["dismissal_kind"] != "retired hurt")]["bowler"].value_counts()[:20]
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Player Names',fontsize=20)
plt.ylabel('No Of Wickets',fontsize=20)
plt.title('Top 20 Bowlers With Most Number Of Wickets',fontsize=22)
plt.show()
def maxwickets(self):
details = data1[(data1["over"] > 15) & (data1["player_dismissed"] != "notout") & (data1["dismissal_kind"] != "run out") & (data1["dismissal_kind"] != "retired hurt")]["bowler"].value_counts()[:20]
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Players Name',fontsize=20)
plt.ylabel('No Of Wickets',fontsize=20)
plt.title("Top 20 Bowlers With Most Number Of Wickets In Death Overs::Death Overs Specialist",fontsize=22)
plt.show()
def minruns(self):
del_series = data1.groupby(data1.bowler).total_runs.count()
del_df = pd.DataFrame(del_series).reset_index()
del_df.columns = ['bowler', 'delivery']
bowlers = del_df.loc[del_df.delivery >= 90, :].bowler
condition = (data1.over > 15) & (data1.bowler.isin(bowlers))
df = data1.loc[condition, :]
details = df.groupby(df.bowler).total_runs.mean().sort_values().head(10)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Bowlers Name',fontsize=20)
plt.ylabel('Runs Per Ball',fontsize=20)
plt.title('Best Economy In Death Overs',fontsize=20)
plt.title('Death Overs Specialist',fontsize=22)
plt.show()
def runouts(self):
condition = data1.dismissal_kind == 'run out'
df = data1.loc[condition, :]
details = df.groupby(df.fielder).batsman.count().sort_values(ascending=False).head(20)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Fielders Name',fontsize=20)
plt.ylabel('No Of Run Outs',fontsize=20)
plt.title("Top 20 Fielders With Most Number Of Run Outs",fontsize=22)
plt.show()
def catches(self):
condition = (data1.dismissal_kind == 'caught and bowled') | (data1.dismissal_kind == 'caught')
df = data1.loc[condition, :]
details = df.groupby(df.fielder).batsman.count().sort_values(ascending=False).head(20)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Fielders Name',fontsize=20)
plt.ylabel('No Of Catches',fontsize=20)
plt.title("Top 20 Fielders With Most Number Of Catches",fontsize=22)
plt.show()
def keepers(self):
condition = data1.dismissal_kind == 'stumped'
df = data1.loc[condition, :]
details = df.groupby(df.fielder).batsman.count().sort_values(ascending=False).head(20)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Fielders Name',fontsize=20)
plt.ylabel('No Of Catches And Stumpout By Keepers',fontsize=20)
plt.title("Top 20 Keepers",fontsize=22)
plt.show()
def selectedbat1(self):
try:
value = self.combo1.currentText()
if value=='SELECT ANY PLAYER':
QMessageBox.about(self, "Error", "No Player Selected!!!")
QMessageBox.setBaseSize(self,QSize(800, 120))
else:
pass
values = data1.batsman.unique()
values.sort()
for i in range(0,461):
if value == values[i]:
df = data1.loc[data1.batsman == values[i], :]
details=df.groupby(df.bowling_team).batsman_runs.sum().sort_values(ascending=False)
#print(details)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Team Name',fontsize=20)
plt.ylabel('No Of Runs',fontsize=20)
plt.title(values[i] +"'s Runs Against Various Teams",fontsize=22)
plt.show()
else:
pass
#details.to_csv('againstteams.csv')
except BaseException as ex:
print(ex)
def selectedbat2(self):
try:
value = self.combo1.currentText()
if value == 'SELECT ANY PLAYER':
QMessageBox.about(self, "Error", "No Player Selected!!!")
QMessageBox.setBaseSize(self, QSize(800, 120))
else:
pass
values = data1.batsman.unique()
values.sort()
for i in range(0,461):
if value == values[i]:
df = data1.loc[data1.batsman == values[i], :]
details=df.groupby(df.bowler).batsman_runs.sum().sort_values(ascending=False).head(20)
#print(details)
details.plot(kind="bar",figsize=(14,8),fontsize=12)
plt.subplots_adjust(bottom=0.25)
plt.xlabel('Bowler Name',fontsize=20)
plt.ylabel('No Of Runs',fontsize=20)
plt.title(values[i] +"'s Top Scores Against Top 20 Bowlers",fontsize=22)
plt.show()
else:
pass
#details.to_csv('againstteams.csv')
except BaseException as ex:
print(ex)
if __name__ == '__main__':
app = QApplication(sys.argv)
obj = Demo2()
sys.exit(app.exec_()) |
import sympy as sm
import numpy as np
import torch
import time
#_generate_matrix______________________________________________
def sympy_coo_matrix(I, J, V, size):
# sort I, J, V such that I is monotonic increasing
from sympy import Matrix
I, J, V = list(I), list(J), Matrix(V)
I_idx = sorted(range(len(I)),key=I.__getitem__)
I = [int(I[i]) for i in I_idx]
J = [int(J[i]) for i in I_idx]
V = Matrix([V[i] for i in I_idx])
# the row cut index for csr
I_cuts = [0]
x = 0
for i in range(len(I)):
if x < I[i]:
for n in range(I[i]-x):
I_cuts += [i]
x = I[i]
I_cuts += [len(I)]
# csr in sympy
from sympy.matrices.sparsetools import _csrtodok
M = _csrtodok([V, J, I_cuts, size])
return M
#______________________________________________________________
#_Matrix_format_convertions:___________________________________
def sympy_mat_to_str(M):
mat_str = "["
for i in range(M.shape[0]):
row = "["
for j in range(M.shape[1]):
row += str(M[i,j])
row += ', '
row += ']'
mat_str += row
mat_str += ', '
mat_str += ']'
return mat_str
def sympy_mat_to_list_of_str(M):
mat_list = []
for i in range(M.shape[0]):
row = []
for j in range(M.shape[1]):
row += [str(M[i,j])]
mat_list += [row]
return mat_list
def to_sympy_mat(mat):
if type(mat) == list:
M = sm.Matrix(mat)
elif type(mat) == str:
from sympy.parsing.sympy_parser import parse_expr
# vector will become n*1 matrix
parsed_mat = parse_expr(mat)
if type(parsed_mat) not in [list, tuple]:
parsed_mat = [parsed_mat]
M = sm.Matrix(parsed_mat)
return M
#______________________________________________________________
#_evaluate_matrix______________________________________________
def eval_mat_list(mat_list, x, y, z, package='numpy', device='cpu'):
t0 = time.time()
shape = (len(mat_list), len(mat_list[0])) + x.shape
if package == 'numpy':
res = np.empty(shape, dtype=float)
exp, sqrt = np.exp, np.sqrt
elif package == 'torch':
res = torch.empty(shape, dtype=torch.float32, device=device)
exp, sqrt = torch.exp, torch.sqrt
import re
def add_tensor(s):
if not re.findall('[a-z]|[A-Z]', s):
# if 'x' not in s and 'y' not in s and 'z' not in s:
new_s = 'torch.tensor(' + s + ', dtype=torch.float32, device=device)'
else: new_s = s
return new_s
# evaluate element by element
t1 = time.time()
t_expr, t_eval = 0.0, 0.0
for i in range(len(mat_list)):
for j in range(len(mat_list[0])):
tt0 = time.time()
if package == 'torch': # number must be tensor for torch arithmetics
str2eval = re.sub('(exp|sqrt)\((.*?)\)', lambda x:x.group(1)+'('+add_tensor(x.group(2))+')', mat_list[i][j])
else: str2eval = mat_list[i][j]
tt1 = time.time()
res[i, j] = eval(str2eval)
tt2 = time.time()
t_expr += (tt1 - tt0)
t_eval += (tt2 - tt1)
# print("time for preparation: ", t1 - t0)
# print("time for generating expressions: ", t_expr)
# print("time for evaluating at x, y, z: ", t_eval)
return res
def torchify_str(expr_str):
import re
def add_tensor(s):
if 'x' not in s and 'y' not in s and 'z' not in s:
new_s = 'torch.tensor(' + s + ', dtype=torch.float32, device=device)'
else: new_s = s
return new_s
# cannot handle nested exp(sqrt(...))
str2eval = re.sub('(exp|sqrt|sin|cos)\((.*?)\)', lambda x:'torch.'+x.group(1)+'('+add_tensor(x.group(2))+')', expr_str)
return str2eval
def eval_sympy_mat(M, x, y, z, deriv=0, package='numpy', device='cpu'):
# deriv is the order of (elemen-by-element) derivative
import sympy as sm
t0 = time.time()
if type(deriv) == int: deriv = (deriv,)
if deriv in ['x', 'y', 'z']:
Mp = M.applyfunc(lambda f: sm.diff(f,deriv))
else:
Mp = sm.Matrix()
if 0 in deriv:
Mp = Mp.row_join(M)
if 1 in deriv:
assert 1 in M.shape, "can only handle vectors for derivative ..."
if M.shape[0] == 1:
M, Mp = M.T, Mp.T
dx_M = M.applyfunc(lambda f: sm.diff(f,'x'))
dy_M = M.applyfunc(lambda f: sm.diff(f,'y'))
dz_M = M.applyfunc(lambda f: sm.diff(f,'z'))
Mp = Mp.row_join(dx_M).row_join(dy_M).row_join(dz_M)
# convert to list then evaluate
t1 = time.time()
mat_list = sympy_mat_to_list_of_str(Mp)
t2 = time.time()
res = eval_mat_list(mat_list, x, y, z, package=package, device=device)
t3 = time.time()
# print("time for derivation: ", t1 - t0)
# print("time for mat to str: ", t2 - t1)
# print("time for evaluation: ", t3 - t2)
## remove one dim from vector M
# if squeezeM:
# if len(mat_list[0]) == 1:
# if package == 'numpy': res = np.squeeze(res, axis=1)
# elif package == 'torch': res = res.squeeze(1)
# elif len(mat_list) == 1: # doesn't remove another dim even for 1x1 mat
# if package == 'numpy': res = np.squeeze(res, axis=0)
# elif package == 'torch': res = res.squeeze(0)
return res
#______________________________________________________________
# polynomials__________________________________________________
def poly_to_list_of_dict(poly_list, to_str=True, to_torch=False):
# monoms = [poly.monoms() for poly in poly_list]
# coeffs = [poly.coeffs() for poly in poly_list]
# if to_str:
# coeffs = [[str(co) for co in co_s] for co_s in coeffs]
# if to_torch:
# coeffs = [[torchify_str(co) for co in co_s] for co_s in coeffs]
# return monoms, coeffs
if to_str:
func = lambda x: str(x)
if to_torch: func = lambda x: torchify_str(str(x))
else: func = lambda x: x
poly_list_of_dict = [dict(zip(poly.monoms(), [func(ele) for ele in poly.coeffs()])) for poly in poly_list]
return poly_list_of_dict
def multiply_polyes_sm(p1, p2):
# p1, p2 are list_of_dicts
return [{x: A.get(x, 1)*(B.get(x, 1)) for x in set(A).union(B)} for B in p2 for A in p1]
def multiply_polyes_str(p1, p2):
# p1, p2 are list_of_dicts
return [{x: '('+A.get(x, 1)+')*('+B.get(x, 1)+')' for x in set(A).union(B)} for B in p2 for A in p1]
def flatten_poly(poly):
if type(poly[0]) == list:
poly = poly_to_list_of_dict(poly)
i_poly = 0
idx, orders, coeffs = [], [], []
for poly_dict in poly:
idx += [i_poly]*len(poly_dict)
orders += list(poly_dict.keys())
coeffs += list(poly_dict.values())
i_poly += 1
return {'orders':orders, 'coeffs':coeffs, 'poly_indces':idx, 'n_polyes':i_poly}
def eval_poly_r_ref_torch(poly, r_ref):
# evaluate with tensor of x,y,z ref, no rotation, no derivatives
poly = flatten_poly(poly)
x_ref, y_ref, z_ref = r_ref.split(1, dim=-1)
x_ref, y_ref, z_ref = x_ref.squeeze(-1), y_ref.squeeze(-1),z_ref.squeeze(-1)
#
import torch
orders = torch.tensor(poly['orders'])
n_terms = len(poly['poly_indces'])
r_shape = r_ref.shape[:-1]
coeffs = torch.empty((n_terms,)+r_shape, dtype=torch.float32)
for i_poly in range(n_terms):
print(coeffs[i_poly].shape)
print('t',poly['coeffs'][i_poly])
coeffs[i_poly] = eval(poly['coeffs'][i_poly])
idx = torch.tensor(poly['poly_indces'])
return {'orders':orders, 'coeffs':coeffs, 'poly_indces':idx, 'n_polyes':poly['n_polyes']}
#______________________________________________________________
#_transformations____________________________________________
def transform_expr(expr, trans_dict): # expr can also be a sympy matrix
x, y, z = sm.symbols('x, y, z')
xo, yo, zo = sm.symbols('xo, yo, zo')
# xn, yn, zn = sm.symbols('xn, yn, zn')
if 'rotation' in trans_dict and trans_dict['rotation'] is not None:
R = sm.Matrix(trans_dict['rotation'])
R = R.inv()
V = sm.Matrix([x, y, z])
xn, yn, zn = R * V
# sympy will mess up notations if substituded directly
expr = expr.subs({x:xo, y:yo, z:zo})
expr = expr.subs({xo:xn, yo:yn, zo:zn})
if 'translation' in trans_dict and trans_dict['translation'] is not None:
T = trans_dict['translation']
xp, yp, zp = x-T[0], y-T[1], z-T[2]
expr = expr.subs({x:xp, y:yp, z:zp})
if 'derivative' in trans_dict and trans_dict['derivative'] is not None:
if ('x' or 'y' or 'z') in trans_dict['derivative']:
cdx = trans_dict['derivative'].count(x)
cdy = trans_dict['derivative'].count(y)
cdz = trans_dict['derivative'].count(z)
expr = expr.diff('x', cdx, 'y', cdy, 'z', cdz)
if trans_dict['derivative'].strip().lower() == 'laplace':
expr = expr.diff('x', 2) + expr.diff('y', 2) + expr.diff('z', 2)
return expr
def transform_expr_pure_sm(expr, trans_dict):
# the contents of rotation and translation in trans_dict are ignored, expr contains symbols redefined
if trans_dict is None: return {'expr':expr, 'listOfSymbolStrings':[]}
x, y, z = sm.symbols('x, y, z')
x_o, y_o, z_o = sm.symbols('x_o, y_o, z_o')
# sybs = ['x', 'y', 'z']
sybs = []
if 'rotation' in trans_dict and trans_dict['rotation'] is not None and trans_dict['rotation']!=False:
R, R_sybs = rotation_mat_sm().values()
V = sm.Matrix([x, y, z])
x_n, y_n, z_n = R * V
# sympy will mess up notations if substituded directly
expr = expr.subs({x:x_o, y:y_o, z:z_o})
expr = expr.subs({x_o:x_n, y_o:y_n, z_o:z_n})
sybs += R_sybs
if 'translation' in trans_dict and trans_dict['translation'] is not None and trans_dict['translation']!=False:
x_ref, y_ref, z_ref = sm.symbols('x_ref, y_ref, z_ref')
x_n, y_n, z_n = x-x_ref, y-y_ref, z-z_ref
expr = expr.subs({x:x_o, y:y_o, z:z_o})
expr = expr.subs({x_o:x_n, y_o:y_n, z_o:z_n})
sybs += ['x_ref', 'y_ref', 'z_ref']
if 'derivative' in trans_dict and trans_dict['derivative'] is not None and trans_dict['derivative']!=False:
if ('x' or 'y' or 'z') in trans_dict['derivative']:
cdx = trans_dict['derivative'].count('x')
cdy = trans_dict['derivative'].count('y')
cdz = trans_dict['derivative'].count('z')
expr = expr.diff('x', cdx, 'y', cdy, 'z', cdz)
if trans_dict['derivative'].strip().lower() == 'laplace':
expr = expr.diff('x', 2) + expr.diff('y', 2) + expr.diff('z', 2)
return {'expr':expr, 'listOfSymbolStrings':sybs}
def rotation_mat_sm():
from sympy.vector import CoordSys3D
from sympy import symbols
# rotation angle: omega, rotation vector: v_x, v_y, v_z (normalized)
omega = symbols('omega')
v_x, v_y, v_z = symbols('v_x, v_y, v_z')
# coordinate systems old: xyz, new: XYZ
xyz = CoordSys3D('xyz')
from sympy.vector import AxisOrienter
orienter = AxisOrienter(omega, v_x*xyz.i + v_y*xyz.j + v_z*xyz.k)
XYZ = xyz.orient_new('XYZ', (orienter, ))
rot_mat_sm = xyz.rotation_matrix(XYZ).subs(v_x**2+v_y**2+v_z**2, 1)
return {'mat':rot_mat_sm, 'listOfSymbolStrings':[str(x) for x in ['omega', 'v_x', 'v_y', 'v_z']]}
#______________________________________________________________
def test():
print(to_sympy_mat('1.0'))
print(to_sympy_mat('x*x, sqrt(3)*x*y, sqrt(3)*x*z, y*y, sqrt(3)*y*z, z*z'))
print(to_sympy_mat('x, y, z'))
if __name__ == '__main__':
test()
|
# Creates an image representing all the DNA code downloaded from 23andme.
#
# Run with 'python3 dna_image.py <name_of_23andme_text_file>'
#
# An image will pop up with the results, and it will be saved as 'dna.png' in
# the current working directory. Note that this image contains all the data in
# your DNA so it should be shared with care. All the DNA data could be
# extracted from the image for processing in other ways.
#
# If not already installed, you will need both 'numpy' and 'pillow' installed.
# Easiest way is with PIP, commands are 'pip3 install numpy' and
# 'pip3 install pillow'.
from PIL import Image
import numpy as np
import math
import sys
# Converts the DNA dump from 23andme in the file named `filename` to a Python
# array of arrays of genetic code.
#
# For example:
#
# # This is a comment
# 'rs548049170 1 69869 TT'
# 'rs9283150 1 565508 AA'
#
# turns into
#
# [['rs548049170', '1', '69869', 'TT'],
# ['rs9283150', '1', '565508', 'AA']]
def get_dna_data(filename):
data = []
with open(filename) as dna:
for line in dna:
if line.startswith('#'):
continue
data.append(line.rstrip('\n').split('\t'))
return data
# Converts raw gene data into just the genetic letters (A, T, C, or G) in the
# left or right side of the chromosome.
#
# `data` is the array of arrays from get_dna_data()
# `name` is the name of the chromosome such as '1', '2', 'X', 'Y', or 'MT'
# `left` is a boolean on whether to retrieve the left or right chromosome
# ``
def get_chromosome_letters(data, name, left):
letters = []
for dna in data:
if dna[1] == name:
# Some chromosomes have no match on the right side so have only one
# letter in the raw data. Replace it with a '-' here to make further
# processing simpler.
if not left and len(dna[3]) == 1:
letters.append('-')
else:
letters.append(dna[3][0 if left else 1])
return letters
# Converts a genetic letter to a color for the image to be drawn in RGB format.
#
# A -> Red
# T -> Green
# C -> Blue
# G -> Yellow
# Missing or no match data -> Black
def letter_to_color(letter):
if letter == 'A':
return [255, 0, 0]
elif letter == 'T':
return [0, 255, 0]
elif letter == 'C':
return [0, 0, 255]
elif letter == 'G':
return [255, 255, 0]
return [0, 0, 0]
# Converts all the genetic letters corresponding to a chromosome into a square
# array of colors for converting to an image later.
def letters_to_colors(letters):
size = math.ceil(math.sqrt(len(letters)))
colors = np.zeros((size, size, 3), dtype=np.uint8)
for i in range(len(letters)):
colors[(i // size), (i % size)] = letter_to_color(letters[i])
return colors
# Combines the left and right sides of a chromosome into one image.
def combined_chromosome_colors(data, name):
left = letters_to_colors(get_chromosome_letters(data, name, True))
right = letters_to_colors(get_chromosome_letters(data, name, False))
# Put a vertical line of black to differentiate the two sides in the image.
split = np.zeros((left.shape[0], 1, 3), dtype=np.uint8)
return np.concatenate((left, split, right), axis=1)
# Combines two images of possibly differing sizes by stacking one top of the
# other vertically and expanding with black to fill in the gaps.
#
# For example:
#
# ABC
# ABC JK DEF
# DEF + LM = GHI
# GHI JK-
# LM-
def combine_images(image_one, image_two):
new_height = image_one.shape[0] + image_two.shape[0]
new_width = max(image_one.shape[1], image_two.shape[1])
combined = np.zeros((new_height, new_width, 3), dtype=np.uint8)
# Fill in 0 -> image_one height and 0 -> image_one width for image one.
combined[:image_one.shape[0], :image_one.shape[1]] = image_one
# Fill in image_one height -> new height and 0 -> image_two width with
# image_two.
combined[image_one.shape[0]:new_height, :image_two.shape[1]] = image_two
return combined
# Converts all the DNA data into one 3 dimensional array of colors organized by
# chromosomes (1, 2, ..., 22, X, Y, MT).
def chromosome_colors(data):
print("Processing chromosome... 1")
colors = combined_chromosome_colors(data, '1')
chromosomes = [str(c) for c in range(2, 23)] + ['X', 'Y', 'MT']
for chromosome in chromosomes:
print("Processing chromosome... %s" % chromosome)
colors = combine_images(colors,
combined_chromosome_colors(data, chromosome))
return colors
# Hooks everything together.
#
# Get DNA from file -> convert to an image array -> draw it.
def main():
image = Image.fromarray(chromosome_colors(get_dna_data(sys.argv[1])), 'RGB')
image.save('dna.png')
image.show()
if __name__ == '__main__':
main()
|
#encoding=utf-8
from __future__ import unicode_literals
import sys
sys.path.append("../")
import Terry_toolkit as tkit
# t= tkit.Db()
tdb=tkit.Db()
# tdb.add(key="niubi",value={'gender': "male", 'age': 28, 'name': 'john'})
# tdb.add(key="niub2",value="wqwq")
# j=tdb.get(key="niub2")
# print(type(j))
# print(j)
tdb.col('ss')
print(dir(tdb.col.all()))
print(tdb.col.all())
|
from django.conf.urls import url
from cooks import views
urlpatterns = [
url(r'^', views.cooks, name='phoenix-cooks'),
] |
#!/usr/bin/python
# This script reads in a fixed form .f file and converts it to a free form
# .f90 file
import sys
import re
import argparse
class FortranLine:
def convert(self):
line = self.line
# If the line is short, replace with a newline. If the line is not short, save the data
if len(line) > 6 and not self.isCpp:
self.code = line[6:]
elif self.isCpp:
self.code = line
else:
self.code = '\n'
# Remove trailing whitespace, but keep the \n
self.code = self.code.rstrip() + '\n'
###############################################
# Deal with GO TO (no stripping of whitepaces)
if (not (self.isComment) and (self.code.lower().find('go to'))>0):
#print("DEBUG: Found a GO TO: ", self.code.lower());
m = re.match('(.*go to)(\s+)(\d+)',self.code.lower())
if m:
#print("DEBUG: Saving GO TO label: '", m.group(3),"'")
GoToList.append(m.group(3))
###############################################
###############################################
# Check for and remove do loop labels
if (not self.isComment) and self.code.lstrip(' ').lower().startswith('do'):
m = re.match('(.*do)\s(\d+)\s(.+)',self.code.lower())
if m:
self.code = m.group(1) + " " + m.group(3) + "\n"
DoLoopList.append(m.group(2))
if ' , ' in self.code:
self.code = self.code.replace(' , ',', ')
if (not (self.isComment or self.isNewComment or self.isCpp)) and '=' in self.code:
m = re.match('(.*)(?:==|<=|>=|=>)(.*)',self.code)
if not m:
m = re.match('(.*\S)=(\S.*)',self.code)
if m:
self.code = m.group(1) + " = " + m.group(2) + "\n"
###############################################
# replace all real*8 with real(8)
if 'real*8' in self.code:
self.code = self.code.replace('real*8','real(8)')
# add ' :: ' to all variable definitions
if self.code.lstrip(' ').lower().startswith(('real','integer','logical','character')) \
and not '::' in self.code:
m = re.match('(.*real)\s+(\D+.*)',self.code.lower())
if m:
self.code = m.group(1) + " :: " + m.group(2)
m = re.match('(.*real\(.+\))\s+(\D+.*)',self.code.lower())
if m:
self.code = m.group(1) + " :: " + m.group(2)
m = re.match('(.*integer)\s+(\D+.*)',self.code.lower())
if m:
self.code = m.group(1) + " :: " + m.group(2)
m = re.match('(.*integer\(.+\))\s+(\D+.*)',self.code.lower())
if m:
self.code = m.group(1) + " :: " + m.group(2)
m = re.match('(.*logical)\s+(\D+.*)',self.code.lower())
if m:
self.code = m.group(1) + " :: " + m.group(2)
m = re.match('(.*character)\s+(\D+.*)',self.code.lower())
if m:
self.code = m.group(1) + " :: " + m.group(2)
m = re.match('(.*character\(.+\))\s+(\D+.*)',self.code.lower())
if m:
self.code = m.group(1) + " :: " + m.group(2)
m = re.match('(.*character\*\d+)\s+(\D+.*)',self.code.lower())
if m:
self.code = m.group(1) + " :: " + m.group(2)
# replace 'elseif' with 'else if'
if 'elseif' in self.code.lower() and not self.isCpp:
self.code = self.code.lower().replace('elseif','else if')
if 'endif' in self.code.lower() and not self.isCpp:
self.code = self.code.lower().replace('endif','end if')
if 'enddo' in self.code.lower():
self.code = self.code.lower().replace('enddo','end do')
###############################################
# replace all continue lines with End Do (no stripping of whitepaces)
# only if expected
if not((self.isComment) or (self.isNewComment)):
if 'continue' in self.code.lower():
#print("DEBUG: Continue found with label ",self.label)
if (len(DoLoopList) > 0):
if(DoLoopList[-1]==(self.label.strip())):
#print("DEBUG: Matched '", DoLoopList[-1],"' with '",self.label,"'")
GoToSet = set(GoToList)
if (self.label.strip() in GoToSet):
# Save the label because it is used by GO TO
#print("DEBUG: This is also used by GO TO '", self.label.strip(),"'")
gotoindex = GoToList.index(self.label.strip())
del GoToList[gotoindex]
# Save the label and CONTINUE statement in the CONTINUE stack
ContinueStack.append(self.label + " " + self.code)
# Drop the label as it is linked to a DO
self.code = self.code.lower().replace("continue","end do")
self.label = ''
else:
# Drop the label as it is linked to a DO
self.code = self.code.lower().replace("continue","end do")
self.label = ''
del DoLoopList[-1]
else:
# Preserve CONTINUE without a label or not a CONTINUE for a GO TO
print("WARNING: Unexpected label for CONTINUE: '", self.label.strip(), "' expected '",DoLoopList[-1],"'")
GoToSet = set(GoToList)
if (self.label.strip() in GoToSet):
# Matched to GO TO Label
#print("DEBUG: This is actually used by GO TO '", self.label.strip(),"'")
gotoindex = GoToList.index(self.label.strip())
del GoToList[gotoindex]
self.label = self.label + " "
else:
# Not a Label for a GO TO
if(len(self.label.strip())>0):
MismatchedLabelList.append(self.label.strip())
self.label = self.label + " "
else:
# Label without a prior DO
#print("DEBUG: Label without prior DO '", self.label.strip(),"'")
if (len(GoToList) > 0):
GoToSet = set(GoToList)
if (self.label.strip() in GoToSet):
# Matched to GO TO Label
#print("DEBUG: Preserving GO TO label '", self.label.strip(),"'")
gotoindex = GoToList.index(self.label.strip())
del GoToList[gotoindex]
self.label = self.label + " "
else:
# Standalone Label or future GO TO label
print ("WARNING: Preserving label not linked to DO or previous GO TO: '", self.label.strip(),"'")
if(len(self.label.strip())>0):
MismatchedLabelList.append(self.label.strip())
self.label = self.label + " "
else:
print ("WARNING: Preserving label not linked to DO or previous GO TO: '", self.label.strip(),"'")
self.label = self.label + " "
else:
if 'continue' in self.code.lower():
#print("DEBUG: Comment based Continue found with label ",self.label)
pass
###############################################
###############################################
# handle format statement with label
if 'format' in self.code.lower():
self.label = self.label + " "
################################################
###############################################
# handle return statement with label
if 'return' in self.code.lower():
self.label = self.label + " "
################################################
# replace all .gt., .lt., etc with >, <, etc
if ".gt." in self.code.lower():
self.code = self.code.lower().replace(".gt."," > ")
if ".lt." in self.code.lower():
self.code = self.code.lower().replace(".lt."," < ")
if ".eq." in self.code.lower():
self.code = self.code.lower().replace(".eq."," == ")
if ".ge." in self.code.lower():
self.code = self.code.lower().replace(".ge."," >= ")
if ".le." in self.code.lower():
self.code = self.code.lower().replace(".le."," <= ")
if ".ne." in self.code.lower():
self.code = self.code.lower().replace(".ne."," /= ")
if self.isComment and self.line[1:].isspace():
self.converted_line = '\n'
elif self.isComment:
self.converted_line = '!' + line[1:]
elif self.isNewComment:
self.converted_line = line
elif self.isCpp:
self.converted_line = self.code
elif not self.label.isspace():
self.converted_line = self.label + self.code
else:
self.converted_line = self.code
# Pull the filetype
global filetype
###############################################
# Ignore Module|Subroutine|Program|Function if it is a comment
# Names must start with a non-digit but can contain non-digits
if not((self.isComment) or (self.isNewComment)):
if ( self.code.lower().lstrip(' ').startswith('module')):
#print("DEBUG: Match 1:", self.code.lower())
# Modules do not have "("
m = re.match('(module)\s(\D\w+)',self.code.lower().strip(' '))
if m:
filetype.append(m.group(1))
filename.append(m.group(2))
#print("DEBUG: MODULE used", self.code.lower(), filetype, filename)
elif ( self.code.lower().lstrip(' ').startswith(('subroutine','program','function'))):
#print("DEBUG: Match 2:", self.code.lower())
m = re.match('(subroutine|program|function)\s(\D\w+)\(.*',self.code.lower().strip(' '))
if m:
filetype.append(m.group(1))
filename.append(m.group(2))
#print("DEBUG SUBROUTINE used", self.code.lower(), filetype, filename)
m = re.match('(program)\s(\D\w+)',self.code.lower().strip(' '))
if m:
filetype.append(m.group(1))
filename.append(m.group(2))
#print("DEBUG: PROGRAM used", self.code.lower(), filetype, filename)
###############################################
# Check if the current line is indented more (less) than the current line.
global baseIndent
global incrementalIndent
global continuationIndent
if ('subroutine' in self.code.lower()) or self.isComment or self.isCpp:
self.Indent = 0
self.prevIndent = max(baseIndent,self.prevIndent)
elif self.isContinuation:
self.Indent = prevIndent + continuationIndent
elif self.code.lower().lstrip(' ').rstrip(' ') == 'end\n':
self.Indent = 0
self.converted_line = self.code.rstrip('\n') + " " + filetype[-1] + " " + filename[-1] + "\n"
del filetype[-1]
del filename[-1]
elif (self.code.lstrip(' ').lower().startswith(('if ','if(')) and \
self.code.rstrip(' ').lower().endswith('then\n')):
self.Indent = max(baseIndent,self.prevIndent)
self.prevIndent = self.Indent + incrementalIndent
elif (self.code.lstrip(' ')[0:3].lower() == 'do '):
self.Indent = max(baseIndent,self.prevIndent)
self.prevIndent = self.Indent + incrementalIndent
elif (self.code.lstrip(' ')[0:4].lower() == 'end '):
self.Indent = max(baseIndent,self.prevIndent - incrementalIndent)
self.prevIndent = self.Indent
elif (self.code.lstrip(' ').lower().startswith(('else ','else\n'))):
self.Indent = max(baseIndent,self.prevIndent - incrementalIndent)
self.prevIndent = self.Indent + incrementalIndent
else:
m = re.match('(\s+)(\d+)(\s+)(.*)',self.converted_line)
if m:
self.Indent = 1
else:
self.Indent = max(baseIndent,self.prevIndent)
self.prevIndent = self.Indent
self.converted_line = self.converted_line.lstrip(' ').rjust(len( \
self.converted_line.lstrip(' '))+self.Indent)
# Ensure that there is a \n at the end of each line
self.converted_line = self.converted_line.rstrip() + " \n"
def continueLine(self):
self.converted_line = self.converted_line.rstrip() + " &\n"
def analyze(self):
line = self.line
# Pull the first character from the line
if len(line) > 0:
firstChar = line[0]
else:
firstChar = ''
# Check if the line contains a numeric label
if len(line) > 1:
self.label = line[0:5].rstrip(' ').lower() + ''
else:
self.label = ''
# Pull the value in the location of a continuation character
if len(line) >= 6:
contChar = line[5]
else:
contChar = ''
# Pull the first five characters, after the first character
if len(line) > 1:
firstFive = line[1:5]
else:
firstFive = ''
# Check if the line is shorter than 6 characters, or longer than 73
# debug, mdt :: might remove the check if it is long
self.isShort = (len(line) <= 6)
self.isLong = (len(line) > 73)
# Check if the line is a comment
self.isComment = firstChar in "cC*!"
self.isCpp = (firstChar == '#')
self.isNewComment = '!' in firstFive and not self.isComment
# Now check to see if the line is a regular line
self.isRegular = (not (self.isComment or self.isNewComment or self.isShort or self.isCpp))
self.isContinuation = (not (contChar.isspace() or contChar == '0') and self.isRegular)
# Check for 'const.h'
if 'const.h' in self.line:
global outfilen
print (" *** File: " + outfilen + " *** ")
print ("Warning :: \"include \'const.h\'\" needs to be replaced with \"use const\"")
# Return the truncated line (if truncation occured)
self.line = line
self.convert()
def __init__(self,line):
# Convert line from fixed form to free form
self.line = line
self.converted_line = line
self.comment = False
self.isContinuation = False
self.Indent = 0
global prevIndent
self.prevIndent = prevIndent
self.analyze()
def __repr__(self):
return self.converted_line
# Check to make sure that a filename was passed
parser = argparse.ArgumentParser(description='This script converts a fixed-form .f file to a \
free form .f90 file')
parser.add_argument('files',help='REQUIRED. List of .f input files.',nargs="+")
parser.add_argument('-base',help='The base indentation. Default = 4',type=int,default=4)
parser.add_argument('-incr',help='The incremental indentation. Default = 2',type=int,default=2)
parser.add_argument('-cont',help='The continuation indentation. Default = 10',type=int,default=10)
args = parser.parse_args()
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if len(args.files) > 0:
print ("")
print ("*** f77tof90.py converts a fixed-form .f file to a free-form .f90 file")
print ("*** and converts much of the f77 specific code to f90 code (e.g., ")
print ("*** removes numerical 'do' labels and replaces 'continue' statements")
print ("*** with 'end do'.")
print ("*** ")
print (bcolors.WARNING + "*** NOTE: This script is not perfect. It WILL NOT produce a compile-ready")
print (bcolors.WARNING + "*** .f90 file. However, it will perform much of the conversion. The user")
print (bcolors.WARNING + "*** MUST perform a final analysis / conversion of the code" + bcolors.ENDC)
print ("*** ")
print (bcolors.FAIL + "*** NOTE2: This script has problems with goto statements, and the ")
print (bcolors.FAIL + "*** corresponding continue statements. The continue statements")
print (bcolors.FAIL + "*** will likely be replaced with an End Do statement, and the label removed" + bcolors.ENDC)
print ("")
print ("-------------------")
baseIndent = args.base
incrementalIndent = args.incr
continuationIndent = args.cont
# for numArg in range(1,len(sys.argv)):
# infilen = sys.argv[numArg]
for infilen in args.files:
print ("")
print ("Converting file: " + infilen)
prevIndent = 0
filetype = []
filename = []
DoLoopList = []
GoToList = []
MismatchedLabelList = []
ContinueStack = []
# Grab the file name, excluding the '.f'
name_len = len(infilen.rsplit('.',1)[0])
extension = infilen.rsplit('.',1)[1]
outfilen = infilen[:name_len] + '.' + extension + '90'
infile = open(infilen, 'r')
sys.file = open(outfilen,'w')
linestack = []
for line in infile:
newline = FortranLine(line)
prevIndent = newline.prevIndent
if newline.isRegular:
if newline.isContinuation and linestack:
linestack[0].continueLine()
for l in linestack:
sys.file.write(str(l))
linestack = []
linestack.append(newline)
###############################################
# CONTINUE for a GO TO label
if(len(ContinueStack)>0):
#print("DEBUG: printing continue statement ",ContinueStack[-1])
linestack.append(ContinueStack[-1])
del ContinueStack[-1]
###############################################
for l in linestack:
sys.file.write(str(l))
infile.close()
if((len(DoLoopList)>0)):
print("WARNING: DO(s) not matched with END DO: ",DoLoopList)
if((len(GoToList)>0)):
print("WARNING: GO TO label(s) not found ",GoToList)
if((len(MismatchedLabelList)>0)):
print("WARNING: Unexpected label(s) found ",MismatchedLabelList)
else:
print ("Usage: python f77tof90.py <list of .f files>") |
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
from django.urls import reverse
'''
class Right(models.Model):
name = models.CharField(max_length=100, unique=True, verbose_name="Right", help_text="Enter a right")
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
'''
class MyUser(models.Model):
user = models.OneToOneField(User, unique=True, null=False, blank=False, on_delete=models.CASCADE)
class Meta:
ordering = ["user"]
def __str__(self):
return f"{self.user.username}"
class Address(models.Model):
town = models.CharField(max_length=40, unique=False, verbose_name="Town")
street = models.CharField(max_length=40, unique=False, verbose_name="Street")
number = models.IntegerField(blank=False, null=False, verbose_name="House number")
PSC = models.IntegerField(blank=False, null=False, verbose_name="PSC")
country = models.CharField(max_length=40, unique=False, verbose_name="Country")
def __str__(self):
return f"{self.street} {str(self.number)}, {self.town}, {str(self.PSC)}"
class Cottage(models.Model):
name = models.CharField(max_length=100, unique=False, verbose_name="Cottage name", help_text="Enter a cottage name")
description = models.TextField(blank=True, null=True, verbose_name="About cottage",
help_text="Write some info about your cottage (not required)")
spaces = models.IntegerField(blank=False, null=False, help_text="Enter a number of spaces the cottage has",
verbose_name="Spaces")
address = models.OneToOneField(Address, on_delete=models.CASCADE)
user = models.ForeignKey(User, blank=False, on_delete=models.CASCADE)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
class GroupMember(models.Model):
name = models.CharField(max_length=100, unique=False, verbose_name="Name")
is_child = models.BooleanField(default=False, verbose_name="Is this mamber a child")
user = models.ForeignKey(MyUser, blank=False, on_delete=models.CASCADE)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
class Reservation(models.Model):
start_date = models.DateField(verbose_name="Start date")
end_date = models.DateField(verbose_name="End date")
is_private = models.BooleanField(default=True)
user = models.ForeignKey(MyUser, blank=False, on_delete=models.CASCADE)
members = models.ManyToManyField(GroupMember)
cottage = models.ForeignKey(Cottage, blank=False, on_delete=models.CASCADE)
def __str__(self):
return f"{self.cottage}, {self.user}, {self.start_date} - {self.end_date}"
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from pyexcel_xlsx import save_data
import xlrd
import pymysql.cursors
import os
DB_HOST = "***.**.**.*8"
DB_PORT =222
DB_USER = "222"
DB_PASSWORD = "222"
DB_NAME = "222"
# Main routine
# 写Excel数据, xls格式,以sheet为单位
def save_xls_file(xls_header,xls_sheet, file_name):
sheet_data = []
data = OrderedDict()
if os.path.exists(file_name):
#d打开表格文件
with xlrd.open_workbook(file_name) as rb:
#获取表格
sheet = rb.sheet_by_index(0)
#行数
nrows = sheet.nrows
#遍历将每一行内容加入sheet_data
for i in range(nrows):
sheet_data.append(sheet.row_values(i))
#每次再次写入内容之前先插入一个空白行
list_space = []
sheet_data.append(list_space)
#添加表头
sheet_data.append(xls_header)
for row_data in xls_sheet:
sheet_data.append(row_data)
# 添加sheet表
data.update({u"Sheet1": sheet_data})
print(type(data))
# 保存成xls文件
save_data(file_name,data)
# 读取txt文本 表头以及sql
f = open(r'F:\LWtest\header.txt','r',encoding='utf-8')
lines = f.readlines()
#names = locals()
sheetLists=[]
for line in lines:
sheetLists.append(list(map(str,line.split('\n'))))
print(sheetLists)
f = open(r'F:\LWtest\sql.txt','r',encoding='utf-8')
lines=f.readlines()
sqlLists=[]
for line in lines:
#用每一行末尾的换行符做分隔 保证sql语句的完整 可以替换任何不会使用的符号替代
sqlLists.append(list(map(str,line.split('\n'))))
#数据库连接
db = pymysql.connect(host=DB_HOST,
user=DB_USER,
port=DB_PORT,
passwd=DB_PASSWORD,
# init_command="set names utf8",
cursorclass=pymysql.cursors.SSDictCursor,
charset='utf8')
db.select_db(DB_NAME)
cur = db.cursor(cursor=pymysql.cursors.SSCursor)
#遍历表头list用来确定io操作的次数
try:
for i in range(len(sheetLists)):
sheet=sheetLists[i][0].split()#将每一行的表头以空格做分隔为单个字段
rowlist=[]
sql=sqlLists[i][0]
print(sql)
cur.execute(sql)
rows=cur.fetchall()
save_xls_file(sheet,rows,r"F:\LWtest\result3.xlsx")
except:
print("异常")
cur.close()
print("程序执行完毕")
|
from django.urls import path, include
urlpatterns = [ # todo
path("auth/", include("api_analysis_dataset.v1.auth.urls"), name="api_v1_auth"),
path("analysis/", include("api_analysis_dataset.v1.analysis.urls"), name="api_v1_analysis")
]
|
#!/usr/bin/env python3
#Author : Eric Normandeau (Louis Bernatchez' Lab)
#This script is available at https://github.com/enormandeau/stacks_workflow
"""Filtering SNPs in VCF file output by STACKS1 or STACKS2 minimaly
Usage:
<program> input_vcf min_cov percent_genotypes max_pop_fail min_mas output_vcf
Where:
input_vcf: is the name of the VCF file to filter (can be compressed with gzip, ending in .gz)
min_cov: minimum allele coverage to keep genotype <int>, eg: 4 or more
percent_genotypes: minimum percent of genotype data per population <float> eg: 50, 70, 80, 100
max_pop_fail: maximum number of populations that can fail percent_genotypes <int> eg: 1, 2, 3
min_mas: minimum number of samples with rare allele <int> eg: 2 or more
output_vcf: is the name of the filtered VCF (can be compressed with gzip, ending in .gz)
WARNING:
The filtering is done purely on a SNP basis. Loci are not taken into account.
"""
# Modules
import gzip
import sys
# Functions
def myopen(_file, mode="rt"):
if _file.endswith(".gz"):
return gzip.open(_file, mode=mode)
else:
return open(_file, mode=mode)
def get_population_info(line):
"""Return dictionary of population names with a list of their sample
positions in the lines of the VCF file.
"""
pops = [x.split("_")[0] for x in line.split("\t")[9:]]
unique_pops = sorted(list(set(pops)))
print(" " + str(len(unique_pops)) + " populations")
pop_dict = {}
for p in unique_pops:
pop_dict[p] = []
for i, p in enumerate(pops):
pop_dict[p].append(i)
return pop_dict
def correct_genotype(genotype_info, min_cov):
"""Correct genotype to ./. if coverage < min_cov
"""
infos = genotype_info.split(":")
if infos[0] == "./.":
return genotype_info
cov = int(infos[1])
if cov >= min_cov:
return genotype_info
else:
return ":".join(["./."] + infos[1:])
# Parse user input
try:
input_vcf = sys.argv[1]
min_cov = int(sys.argv[2])
percent_genotypes = float(sys.argv[3])
max_pop_fail = int(sys.argv[4])
min_mas = int(sys.argv[5])
output_vcf = sys.argv[6]
except:
print(__doc__)
sys.exit(1)
# Validate parameter values
assert min_cov >= 0, "min_cov needs to be zero or a positive integer"
assert percent_genotypes >= 0 and percent_genotypes <= 100.0, "percent_genotypes needs to be a number between zero and 100"
assert max_pop_fail >= 0, "max_pop_fail needs to be a null or positive integer"
assert min_mas >= 1, "min_mas needs to be a non-null positive integer"
# Loop over VCF
with myopen(input_vcf) as infile:
with myopen(output_vcf, "wt") as outfile:
for line in infile:
l = line.strip().split("\t")
# Header info
if line.startswith("##"):
outfile.write(line)
continue
# Sample names
if line.startswith("#CHROM"):
outfile.write(line)
pop_info = get_population_info(line)
continue
# SNP line split into info and genotypes
infos = l[:9]
genotypes = l[9:]
# Correct genotypes with coverage below min_cov
genotypes = [correct_genotype(x, min_cov) for x in genotypes]
# Remove SNPs with MAS below threshold
mas = len([1 for x in genotypes if x.split(":")[0] in ["0/1", "1/0", "1/1"]])
# The second part of the test (after the or) is to take into
# account that we may be filtering a VCF that is a subset of a
# larger VCF file where a SNP could be 100% homozygote for the rare
# allele in the samples we kept, even if its MAF was globally less
# than 0.5 in the original VCF.
non_null_genotypes = [x for x in genotypes if not x.split(":")[0] in ["./.", "."]]
if mas < min_mas or mas > len(non_null_genotypes) - min_mas + 1:
continue
# Remove SNPs with too much missing data in too many populations
pops_failed = 0
max_missing_failed = False
for pop in pop_info:
sample_ids = pop_info[pop]
num_samples = len(sample_ids)
samples = [genotypes[i] for i in sample_ids]
num_missing = len([1 for x in samples if x.split(":")[0] == "./."])
prop_missing = num_missing / num_samples
if prop_missing > 1 - (percent_genotypes / 100):
pops_failed += 1
if pops_failed > max_pop_fail:
max_missing_failed = True
break
if not max_missing_failed:
# Create corrected line
line = "\t".join(infos + genotypes) + "\n"
outfile.write(line)
|
#-*- coding: iso-8859-15 -*-
''' Cartesian control: Torso and Foot trajectories '''
import config
import motion
def main():
''' Example of a cartesian foot trajectory
Warning: Needs a PoseInit before executing
Example available: path/to/aldebaran-sdk/modules/src/examples/
python/motion_cartesianFoot.py
'''
proxy = config.loadProxy("ALMotion")
#Set NAO in stiffness On
config.StiffnessOn(proxy)
# send robot to Pose Init
config.PoseInit(proxy)
space = motion.SPACE_NAO
axisMask = 63 # control all the effector's axes
isAbsolute = False
# Lower the Torso and move to the side
effector = "Torso"
path = [0.0, -0.07, -0.03, 0.0, 0.0, 0.0]
time = 2.0 # seconds
proxy.positionInterpolation(effector, space, path,
axisMask, time, isAbsolute)
# LLeg motion
effector = "LLeg"
path = [0.0, 0.06, 0.00, 0.0, 0.0, 0.8]
times = 2.0 # seconds
proxy.positionInterpolation(effector, space, path,
axisMask, times, isAbsolute)
if __name__ == "__main__":
main()
|
"""Constants in AstroWeather component."""
DOMAIN = "astroweather"
CONF_FORECAST_TYPE = "forecast_type"
CONF_FORECAST_INTERVAL = "forecast_interval"
CONF_LATITUDE = "latitude"
CONF_LONGITUDE = "longitude"
CONF_ELEVATION = "elevation"
CONF_TIMEZONE_INFO = "timezone_info"
CONF_CONDITION_CLOUDCOVER_WEIGHT = "cloudcover_weight"
CONF_CONDITION_SEEING_WEIGHT = "seeing_weight"
CONF_CONDITION_TRANSPARENCY_WEIGHT = "transparency_weight"
CONF_METNO_ENABLED = "metno_enabled"
ASTROWEATHER_PLATFORMS = ["binary_sensor", "sensor", "weather"]
DEVICE_TYPE_WEATHER = "weather"
DEFAULT_ATTRIBUTION = "Powered by 7Timer and Met.no"
DEFAULT_FORECAST_INTERVAL = 60
FORECAST_INTERVAL_MIN = 30
FORECAST_INTERVAL_MAX = 240
DEFAULT_ELEVATION = 0
DEFAULT_TIMEZONE_INFO = "Etc/UTC"
DEFAULT_METNO_ENABLED = True
# In progress, make condition calculation customizable
DEFAULT_CONDITION_CLOUDCOVER_WEIGHT = 3
DEFAULT_CONDITION_SEEING_WEIGHT = 2
DEFAULT_CONDITION_TRANSPARENCY_WEIGHT = 1
ATTR_WEATHER_CLOUDCOVER = "cloudcover_percentage"
ATTR_WEATHER_CLOUDLESS = "cloudless_percentage"
ATTR_WEATHER_SEEING = "seeing_percentage"
ATTR_WEATHER_TRANSPARENCY = "transparency_percentage"
ATTR_WEATHER_LIFTED_INDEX = "lifted_index"
ATTR_WEATHER_CONDITION = "condition_percentage"
ATTR_WEATHER_CONDITION_PLAIN = "condition_plain"
ATTR_WEATHER_PREC_TYPE = "prec_type"
ATTR_WEATHER_DEEPSKY_TODAY_DAYNAME = "deepsky_forecast_today_dayname"
ATTR_WEATHER_DEEPSKY_TODAY_PLAIN = "deepsky_forecast_today_plain"
ATTR_WEATHER_DEEPSKY_TODAY_DESC = "deepsky_forecast_today_desc"
ATTR_WEATHER_DEEPSKY_TOMORROW_DAYNAME = "deepsky_forecast_tomorrow_dayname"
ATTR_WEATHER_DEEPSKY_TOMORROW_PLAIN = "deepsky_forecast_tomorrow_plain"
ATTR_WEATHER_DEEPSKY_TOMORROW_DESC = "deepsky_forecast_tomorrow_desc"
ATTR_WEATHER_SUN_NEXT_RISING = "sun_next_rising"
ATTR_WEATHER_SUN_NEXT_SETTING = "sun_next_setting"
ATTR_WEATHER_SUN_NEXT_RISING_NAUTICAL = "sun_next_rising_nautical"
ATTR_WEATHER_SUN_NEXT_SETTING_NAUTICAL = "sun_next_setting_nautical"
ATTR_WEATHER_SUN_NEXT_RISING_ASTRO = "sun_next_rising_astro"
ATTR_WEATHER_SUN_NEXT_SETTING_ASTRO = "sun_next_setting_astro"
ATTR_WEATHER_MOON_NEXT_RISING = "moon_next_rising"
ATTR_WEATHER_MOON_NEXT_SETTING = "moon_next_setting"
ATTR_WEATHER_MOON_PHASE = "moon_phase"
ATTR_WEATHER_WIND_SPEED_PLAIN = "wind_speed_plain"
ATTR_FORECAST_CLOUDCOVER = "cloudcover_percentage"
ATTR_FORECAST_CLOUDLESS = "cloudless_percentage"
ATTR_FORECAST_SEEING = "seeing_percentage"
ATTR_FORECAST_TRANSPARENCY = "transparency_percentage"
ATTR_FORECAST_LIFTED_INDEX = "lifted_index"
ATTR_FORECAST_HUMIDITY = "humidity"
ATTR_FORECAST_PREC_TYPE = "prec_type"
CONDITION_CLASSES = ["excellent", "good", "fair", "poor", "bad"]
|
from unittest import TestCase
import csv
from transform.Covid19USTransformer import Covid19USTransformer
from transform.Covid19CSV import Covid19CSV
class TestCovid19USTransformer(TestCase):
@staticmethod
def _createVerificationObject(hdr_, str_):
# Input string may have embedded commas in a field.
# 'aaa, bbb, "ccc, ddd", eee'
strAsList = list(csv.reader([str_], delimiter=',', quotechar='"', skipinitialspace=True))[0]
strAsTuple = tuple(strAsList)
obj = Covid19CSV(strAsTuple[2], strAsTuple[6], strAsTuple[5])
hdrAsTuple = tuple(hdr_.split(','))
dateAsList = []
for i in range(11, len(hdrAsTuple)):
dateAsList.append(hdrAsTuple[i])
valAsList = []
for i in range(11, len(strAsTuple)):
valAsList.append(strAsTuple[i])
obj.setDate2Value(dateAsList, tuple(valAsList))
# print(str(obj))
return obj
def setUp(self):
self._header = "UID,iso2,iso3,code3,FIPS,Admin2,Province_State,Country_Region,Lat,Long_,Combined_Key,1/22/2020,1/23/2020,1/24/2020,1/25/2020,1/26/2020,1/27/2020,1/28/2020,1/29/2020,1/30/2020,1/31/2020,2/1/2020,2/2/2020,2/3/2020,2/4/2020,2/5/2020,2/6/2020,2/7/2020,2/8/2020,2/9/2020,2/10/2020,2/11/2020,2/12/2020,2/13/2020,2/14/2020,2/15/2020,2/16/2020,2/17/2020,2/18/2020,2/19/2020,2/20/2020,2/21/2020,2/22/2020,2/23/2020,2/24/2020,2/25/2020,2/26/2020,2/27/2020,2/28/2020,2/29/2020,3/1/2020,3/2/2020,3/3/2020,3/4/2020,3/5/2020,3/6/2020,3/7/2020,3/8/2020,3/9/2020,3/10/2020,3/11/2020,3/12/2020,3/13/2020,3/14/2020,3/15/2020,3/16/2020,3/17/2020,3/18/2020,3/19/2020,3/20/2020,3/21/2020,3/22/2020,3/23/2020,3/24/2020,3/25/2020,3/26/2020,3/27/2020,3/28/2020,3/29/2020,3/30/2020"
self._inputListFromFile = [
'84001053, US, USA, 840, 1053, Escambia, Alabama, US, 31.1256789, -87.15918694, "Escambia, Alabama, US", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1',
'84001055, US, USA, 840, 1055, Etowah, Alabama, US, 34.04567266, -86.04051873, "Etowah, Alabama, US", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 6, 6, 6',
'84001057, US, USA, 840, 1057, Fayette, Alabama, US, 33.72076938, -87.73886638, "Fayette, Alabama, US", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1',
'84001059, US, USA, 840, 1059, Franklin, Alabama, US, 34.44235334, -87.84289505, "Franklin, Alabama, US", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3',
'84001061, US, USA, 840, 1061, Geneva, Alabama, US, 31.09389027, -85.83572839, "Geneva, Alabama, US", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0',
'84001063, US, USA, 840, 1063, Greene, Alabama, US, 32.85504247, -87.95684022, "Greene, Alabama, US", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3',
'84001065, US, USA, 840, 1065, Hale, Alabama, US, 32.76039258, -87.63284988, "Hale, Alabama, US", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1',
'84001067, US, USA, 840, 1067, Henry, Alabama, US, 31.51148016, -85.24267944, "Henry, Alabama, US", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0',
'84005025,US,USA,840,5025,Cleveland,Arkansas,US,33.89723187,-92.18537045,"Cleveland, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,3,3,5,5,5',
'84005027,US,USA,840,5027,Columbia,Arkansas,US,33.21230701,-93.22642793,"Columbia, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1',
'84005029,US,USA,840,5029,Conway,Arkansas,US,35.26205537,-92.70506566,"Conway, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1',
'84005031,US,USA,840,5031,Craighead,Arkansas,US,35.83018283,-90.63235729,"Craighead, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,4,4,4,4,4,4,6,6',
'84005033,US,USA,840,5033,Crawford,Arkansas,US,35.58928601,-94.2446814,"Crawford, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1',
'84005035,US,USA,840,5035,Crittenden,Arkansas,US,35.21247318,-90.30839406,"Crittenden, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,6,7,9,14,17,17',
'84005037,US,USA,840,5037,Cross,Arkansas,US,35.29631396,-90.77185818,"Cross, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1',
'84005039,US,USA,840,5039,Dallas,Arkansas,US,33.97042763,-92.65167437,"Dallas, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005041,US,USA,840,5041,Desha,Arkansas,US,33.83011025,-91.25500948,"Desha, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,2,2,2',
'84005043,US,USA,840,5043,Drew,Arkansas,US,33.59035001,-91.71777921,"Drew, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1',
'84005045,US,USA,840,5045,Faulkner,Arkansas,US,35.14719007,-92.33717519,"Faulkner, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,10,14,23,24,27,29,30,30',
'84005047,US,USA,840,5047,Franklin,Arkansas,US,35.51202821,-93.89299569,"Franklin, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005049,US,USA,840,5049,Fulton,Arkansas,US,36.38177105,-91.81729127,"Fulton, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005051,US,USA,840,5051,Garland,Arkansas,US,34.57692074,-93.14921604,"Garland, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,9,10,16,18,20,20,25,26',
'84005053,US,USA,840,5053,Grant,Arkansas,US,34.29017991,-92.42320562,"Grant, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,3,3,3,3,3,3,3,3',
'84005055,US,USA,840,5055,Greene,Arkansas,US,36.1173546,-90.55832668,"Greene, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1',
'84005057,US,USA,840,5057,Hempstead,Arkansas,US,33.73325583,-93.66935133,"Hempstead, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,1',
'84005059,US,USA,840,5059,Hot Spring,Arkansas,US,34.31709259,-92.95396325,"Hot Spring, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1',
'84005061,US,USA,840,5061,Howard,Arkansas,US,34.09007427,-93.9934871,"Howard, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1',
'84005063,US,USA,840,5063,Independence,Arkansas,US,35.74242707,-91.57001641,"Independence, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,3,3,3,3,3,3,3,3',
'84005065,US,USA,840,5065,Izard,Arkansas,US,36.09604046,-91.90847954,"Izard, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005067,US,USA,840,5067,Jackson,Arkansas,US,35.59802983,-91.21494602,"Jackson, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005069,US,USA,840,5069,Jefferson,Arkansas,US,34.26767081,-91.92619839,"Jefferson, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,3,3,3,3,3,9,21,21,22,23,23,25,25,26',
'84005071,US,USA,840,5071,Johnson,Arkansas,US,35.56759135,-93.46036368,"Johnson, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1',
'84005073,US,USA,840,5073,Lafayette,Arkansas,US,33.24116713,-93.60677071,"Lafayette, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005075,US,USA,840,5075,Lawrence,Arkansas,US,36.04188196,-91.10867198,"Lawrence, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,1,1,1,1',
'84005077,US,USA,840,5077,Lee,Arkansas,US,34.78498904,-90.78383866,"Lee, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005079,US,USA,840,5079,Lincoln,Arkansas,US,33.95317155,-91.74002806,"Lincoln, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,5,5,6,6,6',
'84005081,US,USA,840,5081,Little River,Arkansas,US,33.70375665,-94.23468591,"Little River, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005083,US,USA,840,5083,Logan,Arkansas,US,35.21413234,-93.71951016,"Logan, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005085,US,USA,840,5085,Lonoke,Arkansas,US,34.75392199,-91.88742357,"Lonoke, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1',
'84005087,US,USA,840,5087,Madison,Arkansas,US,36.01038185,-93.72524943,"Madison, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005089,US,USA,840,5089,Marion,Arkansas,US,36.26844485,-92.68451899,"Marion, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005091,US,USA,840,5091,Miller,Arkansas,US,33.31403423,-93.89285258,"Miller, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005093,US,USA,840,5093,Mississippi,Arkansas,US,35.76271485,-90.0519437,"Mississippi, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005095,US,USA,840,5095,Monroe,Arkansas,US,34.6815935,-91.20540287,"Monroe, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005097,US,USA,840,5097,Montgomery,Arkansas,US,34.53704874,-93.65824478,"Montgomery, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005099,US,USA,840,5099,Nevada,Arkansas,US,33.66340119,-93.30632432,"Nevada, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1',
'84005101,US,USA,840,5101,Newton,Arkansas,US,35.91947491,-93.21612969,"Newton, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005103,US,USA,840,5103,Ouachita,Arkansas,US,33.58839816,-92.87795984,"Ouachita, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005105,US,USA,840,5105,Perry,Arkansas,US,34.9459153,-92.94372564,"Perry, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1',
'84005107,US,USA,840,5107,Phillips,Arkansas,US,34.43268455,-90.84800154,"Phillips, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005109,US,USA,840,5109,Pike,Arkansas,US,34.16250414,-93.65789349,"Pike, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1',
'84005111,US,USA,840,5111,Poinsett,Arkansas,US,35.57433534,-90.66268713,"Poinsett, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,3,3,3,3,3,3,3,3',
'84005113,US,USA,840,5113,Polk,Arkansas,US,34.48254879,-94.22728802,"Polk, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1',
'84005115,US,USA,840,5115,Pope,Arkansas,US,35.44871474,-93.03212219,"Pope, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1',
'84005117,US,USA,840,5117,Prairie,Arkansas,US,34.83624419,-91.55162157,"Prairie, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005119,US,USA,840,5119,Pulaski,Arkansas,US,34.77054088,-92.31355101,"Pulaski, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,61,64,78,83,88,92,93,94',
'84005121,US,USA,840,5121,Randolph,Arkansas,US,36.3415714,-91.02455531,"Randolph, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1',
'84005123,US,USA,840,5123,St. Francis,Arkansas,US,35.02201976,-90.74828138,"St. Francis, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005125,US,USA,840,5125,Saline,Arkansas,US,34.64916145,-92.67583224,"Saline, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,4,4,4,4,4,4,6,6',
'84005127,US,USA,840,5127,Scott,Arkansas,US,34.85588887,-94.0632176,"Scott, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'84005129,US,USA,840,5129,Searcy,Arkansas,US,35.9109364,-92.69936482,"Searcy, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,2,2,2,2,2,2',
'84005131,US,USA,840,5131,Sebastian,Arkansas,US,35.19605503,-94.27162713,"Sebastian, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,2,2,2,2,2,5',
'84005133,US,USA,840,5133,Sevier,Arkansas,US,33.99780401,-94.2424869,"Sevier, Arkansas, US",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1'
]
self._dateList = self._header.split(',')[11:]
self._verificationObjects = []
for l in self._inputListFromFile:
self._verificationObjects.\
append(TestCovid19USTransformer._createVerificationObject(self._header, l))
def test_process_header(self):
def test1():
t1 = Covid19USTransformer()
self.assertEqual(0, len(t1._colsToRead))
t1.processHeader('')
self.assertEqual(0, len(t1._colsToRead))
self.assertEqual(0, len(t1._dateList))
def test2():
t1 = Covid19USTransformer()
self.assertEqual(0, len(t1._colsToRead))
t1.processHeader([])
self.assertEqual(0, len(t1._colsToRead))
self.assertEqual(0, len(t1._dateList))
def test3():
t1 = Covid19USTransformer()
self.assertEqual(0, len(t1._colsToRead))
t1.processHeader(['abc'])
self.assertEqual(0, len(t1._colsToRead))
t1.processHeader(['iso3'])
self.assertEqual(1, len(t1._colsToRead))
t1.processHeader(['iso3', 'Province_State'])
self.assertEqual(2, len(t1._colsToRead))
t1.processHeader(['iso3', 'Province_State', 'abracadabra'])
self.assertEqual(2, len(t1._colsToRead))
self.assertEqual(0, len(t1._dateList))
def test4():
t1 = Covid19USTransformer()
self.assertEqual(0, len(t1._colsToRead))
# '"UID,iso2,iso3,code3,FIPS,Admin2,Province_State,Country_Region,Lat,Long_,Combined_Key,1/22/2020...'
# Before 'Combined_Key'. 'Combined_Key' is index #10.
t1.processHeader(self._header.split(',')[:10])
self.assertEqual(3, len(t1._colsToRead))
self.assertEqual(0, len(t1._dateList))
# Include 'Combined_Key'. 'Combined_Key' is index #10.
t1.processHeader(self._header.split(',')[:11])
self.assertEqual(3, len(t1._colsToRead))
self.assertEqual(0, len(t1._dateList))
# # Include date after 'Combined_Key'.
t1.processHeader(self._header.split(',')[:12])
self.assertEqual(4, len(t1._colsToRead))
self.assertEqual(1, len(t1._dateList))
t1.processHeader(self._header.split(',')[:13])
self.assertEqual(5, len(t1._colsToRead))
self.assertEqual(2, len(t1._dateList))
# All date columns.
t1.processHeader(self._header.split(','))
self.assertEqual(72, len(t1._colsToRead))
self.assertEqual(69, len(t1._dateList))
test1()
test2()
test3()
test4()
def test_columns_to_read(self):
t1 = Covid19USTransformer()
colAsTuple = tuple(self._header.split(','))
t1.processHeader(colAsTuple)
self.assertEqual(0, len(t1._objColl))
headerAsTuple = tuple(self._header.split(','))
firstDatePos = headerAsTuple.index('Combined_Key') + 1
self.assertTrue(firstDatePos > 1)
numDateCols = len(headerAsTuple) - firstDatePos
self.assertTrue(numDateCols > 0)
l1 = t1.listOfIndexesOfColumnsToRead()
self.assertEqual(numDateCols + 3, len(l1))
self.assertEqual(0, len(t1._objColl))
self.assertEqual(numDateCols, len(t1._dateList))
l1AsTuple = tuple(l1)
countryCol = headerAsTuple.index('iso3')
self.assertTrue(countryCol >= 0)
self.assertTrue(l1AsTuple.index(countryCol) >= 0)
stateCol = headerAsTuple.index('Province_State')
self.assertTrue(stateCol >= 0)
self.assertTrue(l1AsTuple.index(stateCol) >= 0)
countyCol = headerAsTuple.index('Admin2')
self.assertTrue(countyCol >= 0)
self.assertTrue(l1AsTuple.index(countyCol) >= 0)
# Check all date column header numbers are included.
for i in range(firstDatePos, len(headerAsTuple)):
self.assertTrue(l1AsTuple.index(i) >= 0)
self.assertEqual(tuple(self._dateList), tuple(t1._dateList))
@staticmethod
def _createListFromCSVLine(str_):
# Fields may have commas embedded. Cannot use split.
# For 'US, 31.12, -87.15, "Escambia, Alabama, US", 1/22/2020'
return list(csv.reader([str_], delimiter=',', quotechar='"', skipinitialspace=True))[0]
def test_create_object(self):
t1 = Covid19USTransformer()
headerAsTuple = tuple(self._header.split(','))
firstDatePos = headerAsTuple.index('Combined_Key') + 1
numDateCols = len(headerAsTuple) - firstDatePos
t1.processHeader(self._header.split(','))
colList = t1.listOfIndexesOfColumnsToRead()
self.assertEqual(numDateCols + 3, len(colList))
for i in range(0, len(self._inputListFromFile)):
line = self._inputListFromFile[i]
lineAsList = TestCovid19USTransformer._createListFromCSVLine(line)
# print("xx = ", str(lineAsList))
rawDataList = []
for col in colList:
rawDataList.append(lineAsList[col])
# print(str(tuple(rawDataList)))
o1 = t1.createObject(tuple(rawDataList))
self.assertIsNotNone(o1)
self.assertEqual(str(self._verificationObjects[i]), str(o1))
i = i + 1
def test_add_to_collection(self):
t1 = Covid19USTransformer()
self.assertEqual(0, len(t1._objColl))
t1.processHeader(self._header.split(','))
colList = t1.listOfIndexesOfColumnsToRead()
for line in self._inputListFromFile:
lineAsList = TestCovid19USTransformer._createListFromCSVLine(line)
rawDataList = []
for col in colList:
rawDataList.append(lineAsList[col])
t1.addToCollection(t1.createObject(tuple(rawDataList)))
self.assertEqual(len(self._inputListFromFile), len(t1._objColl))
i = 0
for obj in t1._objColl:
self.assertEqual(str(self._verificationObjects[i]), str(obj))
i = i + 1
def test_get_collection(self):
t1 = Covid19USTransformer()
self.assertEqual(0, len(t1._objColl))
t1.processHeader(self._header.split(','))
colList = t1.listOfIndexesOfColumnsToRead()
for line in self._inputListFromFile:
lineAsList = TestCovid19USTransformer._createListFromCSVLine(line)
rawDataList = []
for col in colList:
rawDataList.append(lineAsList[col])
t1.addToCollection(t1.createObject(tuple(rawDataList)))
self.assertEqual(len(self._verificationObjects), len(t1.getCollection()))
i = 0
for obj in t1.getCollection():
self.assertEqual(str(self._verificationObjects[i]), str(obj))
i = i + 1
def test_process_line(self):
t1 = Covid19USTransformer()
self.assertEqual(0, len(t1._colsToRead))
self.assertEqual(0, len(t1._dateList))
self.assertEqual(0, len(t1._objColl))
t1.processLine(self._header.split(','), True)
self.assertEqual(72, len(t1._colsToRead))
self.assertEqual(69, len(t1._dateList))
self.assertEqual(0, len(t1._objColl))
for line in self._inputListFromFile:
lineAsList = TestCovid19USTransformer._createListFromCSVLine(line)
t1.processLine(lineAsList, False)
self.assertEqual(72, len(t1._colsToRead))
self.assertEqual(69, len(t1._dateList))
self.assertEqual(len(self._inputListFromFile), len(t1._objColl))
i = 0
for obj in t1.getCollection():
self.assertEqual(str(self._verificationObjects[i]), str(obj))
# print('{0}: {1}'.format(i, str(obj)))
i = i + 1
self.assertEqual(72, len(t1._colsToRead))
self.assertEqual(69, len(t1._dateList))
self.assertEqual(len(self._inputListFromFile), len(t1._objColl))
|
__author__ = 'Vineets'
def isprime(num):
i = 2
while i * i <= num:
if num % i == 0:
return False
i += 1
return True
def mapper(data):
output = {}
for num in data:
for i in range(2, num + 1):
if num % i == 0 and isprime(i):
if not i in output:
output[i] = [num]
else:
output[i].append(num)
return output
def reducer(data):
output = []
for key, value in data.items():
output.append([key, sum(value)])
return output
if __name__ == '__main__':
data = [15, 21, 24, 30, 49]
map_out = mapper(data)
red_out = reducer(map_out)
print 'result:', red_out
|
from pydantic import BaseSettings
from typing import Any
import json
class Settings(BaseSettings):
DEBUG: bool = False
TESTING: str = ''
PROJECT_NAME: str = '2343'
PROJECT_API_V1: str = '1.1'
SQLALCHEMY_DATABASE_URI : str = 'postgresql://dbuser:dbpass@localhost:5432/agros-stage'
SQLALCHEMY_TRACK_MODIFICATIONS: bool = False
SQLALCHEMY_ENGINE_OPTIONS_POOL_PRE_PING : bool = True
SQLALCHEMY_ENGINE_OPTIONS_POOL_RECYCLE: int = 200
SQLALCHEMY_ENGINE_OPTIONS_POOL_TIMEOUT: int = 110
# HEADERS
AWS_JWT_PREFIX:str = 'Bearer'
#OTROS
TIME_ZONE:str = 'America/Lima'
#LOGS
LOG_NAME_GROUP:str = ''
class Config:
env_file = ".env"
Config = Settings() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
roc_crossval_comp.py
Uses binary classifiers (category splits 2) and published classifiers to
generate a Receiver Operating Characteristic (ROC) curve with calculated
area under the curve (AUC).
usage: roc_crossval_comp.py
"""
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.cross_validation import StratifiedKFold, StratifiedShuffleSplit
from mut_group_pred_pack import *
from cross_val import get_x_y_scorer_folds
from plot_pca import plot_pdf
from roc_crossval import plot_roc_figure
def get_colors_labels(num_cats):
colors = {
2: '#027878',
22: '#f69568',
3: ('#027878', '#f69568', '#e05254'),
33: ('#027878', '#f69568', '#e05254'),
4: ('#027878', '#f69568', '#fdc865', '#e05254'),
}[num_cats]
labels = {
2: 'Null / Pathogenic',
22: 'Null+Autism / Pathogenic',
3: ('Null', 'Autism', 'Pathogenic'),
33: ('Null', 'Autism+Somatic', 'PHTS'),
4: ('Null', 'Autism', 'Somatic', 'PHTS'),
}[num_cats]
return colors, labels
def plot_polyphen_roc(num_cats):
color = 'g'
label = 'Polyphen-2'
pph2_dict = make_pph2_dict()
X, y, scorer, folds = get_x_y_scorer_folds(pten_mutations, num_cats)
cv = StratifiedKFold(y, n_folds=6, shuffle=True, random_state=0)
# hacky way to make pph2 array
mut_list = start_mut_list()
pph2_array = []
for mut_dict in mut_list:
wtres = mut_dict["wtres"]
codon = mut_dict["codon"]
mutres = mut_dict["mutres"]
pph2prob = get_pph2_prob(pph2_dict, wtres, codon, mutres, "pph2_prob")
pph2_array.append(pph2prob)
pph2_array = np.array(pph2_array)
mean_tpr = 0.0
mean_fpr = np.linspace(0,1,100)
for i, (train, test) in enumerate(cv):
probas_ = pph2_array[test]
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
# plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i,
# roc_auc))
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color,
label='ROC %s (%0.2f)' % (label, mean_auc), lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
def plot_provean_roc(num_cats):
color = 'b'
label = 'Provean'
provean_dict = make_provean_sift_dict()
X, y, scorer, folds = get_x_y_scorer_folds(pten_mutations, num_cats)
cv = StratifiedKFold(y, n_folds=6, shuffle=True, random_state=0)
# hacky way to make pph2 array
mut_list = start_mut_list()
provean_array = []
for mut_dict in mut_list:
variant = "{}{}{}".format(mut_dict["wtres"], mut_dict["codon"],
mut_dict["mutres"])
provean_score = get_provean_sift_score(provean_dict, variant, 'PSCORE')
provean_array.append(1 - provean_score)
provean_array = np.array(provean_array)
mean_tpr = 0.0
mean_fpr = np.linspace(0,1,100)
for i, (train, test) in enumerate(cv):
probas_ = provean_array[test]
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
# plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i,
# roc_auc))
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color,
label='ROC %s (%0.2f)' % (label, mean_auc), lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
def plot_varmod_roc(num_cats):
color = 'r'
label = 'Varmod'
varmod_dict = make_var_mod_dict()
X, y, scorer, folds = get_x_y_scorer_folds(pten_mutations, num_cats)
cv = StratifiedKFold(y, n_folds=6, shuffle=True, random_state=0)
# hacky way to make pph2 array
mut_list = start_mut_list()
varmod_array = []
for mut_dict in mut_list:
wtres = mut_dict["wtres"]
codon = mut_dict["codon"]
mutres = mut_dict["mutres"]
varmod_score = get_var_mod_score(varmod_dict, wtres, codon, mutres,
'VarMod Probability')
varmod_array.append(varmod_score)
varmod_array = np.array(varmod_array)
mean_tpr = 0.0
mean_fpr = np.linspace(0,1,100)
for i, (train, test) in enumerate(cv):
probas_ = varmod_array[test]
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
# plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i,
# roc_auc))
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color,
label='ROC %s (%0.2f)' % (label, mean_auc), lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
def plot_comp_roc_figure(pten_mutations, num_cats):
param_grid = [
{'C': [.1, 1, 10, 100, 1000 ],
'gamma': [0.1, 0.01, 0.001, 0.0001]},
]
X, y, scorer, folds = get_x_y_scorer_folds(pten_mutations, num_cats)
cv = StratifiedKFold(y, n_folds=6, shuffle=True, random_state=0)
svc = svm.SVC(kernel='rbf', cache_size=2000, class_weight='auto',
probability=True)
classifier = GridSearchCV(svc, param_grid=param_grid, scoring=scorer,
cv=folds, n_jobs=-1)
mean_tpr = 0.0
mean_fpr = np.linspace(0,1,100)
color, label1 = get_colors_labels(num_cats)
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
# plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i,
# roc_auc))
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color,
label='ROC PTENpred (%0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
if __name__ == "__main__":
mut_list = get_full_mut_list()
pten_mutations = MutationGroup(mut_list)
plt.figure(figsize=(8, 6))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck (0.50)')
plot_polyphen_roc(2)
plot_provean_roc(2)
plot_varmod_roc(2)
plot_comp_roc_figure(pten_mutations, 2)
plot_pdf("pphenroc4.pdf")
plt.show() |
class Solution:
def numSquares(self, n):
dp = [0]
while len(dp) <= n:
dp.append(min(dp[-i ** 2] for i in range(1, int(len(dp) ** 0.5 + 1))) + 1)
return dp[-1]
if __name__ == '__main__':
solution = Solution()
print(solution.numSquares(6255))
|
import requests
# should create Set-Cookie:mycookie=myvalue header if vulnerable
PAYLOADS = [r"%0D%0ASet-Cookie:mycookie=myvalue",
r"%0d%0aSet-Cookie:mycookie=myvalue",
r"crlf%0dSet-Cookie:mycookie=myvalue",
r"crlf%0aSet-Cookie:mycookie=myvalue",
r"%23%0dSet-Cookie:mycookie=myvalue",
r"%0dSet-Cookie:mycookie=myvalue",
r"%0ASet-Cookie:mycookie=myvalue?foo",
r"%0aSet-Cookie:mycookie=myvalue",
r"/xxx%0ASet-Cookie:mycookie=myvalue;"]
# protocol either 'http://' or 'https://'
def crlf(protocol, subdomain):
for payload in PAYLOADS:
try:
r = requests.get("%s%s/%s" % (protocol, subdomain, payload), verify=False, timeout=.5, allow_redirects=False)
for name in r.cookies.keys():
if "mycookie" in name:
print "[+] Vulnerable: %s%s/%s" % (protocol, subdomain, payload)
except requests.Timeout:
print "\tTimeout"
return False
except Exception as e:
print "ERROR STRING: %s%s:8443/%s" % (protocol, subdomain, payload)
print str(e)
if __name__ == "__main__":
with open("rdp.txt", "r") as f:
for subdomain in f:
crlf("https://", subdomain)
|
import re
import os
import logging
# Adopted from: https://github.com/HASTE-project/haste-image-analysis-container2/tree/master/haste/image_analysis_container2/filenames
# file example
# /share/mikro/IMX/MDC_pharmbio/exp-TimeLapse/A549-20X-DB-HD-BpA-pilot1/2019-03-27/84/TimePoint_1/A549-20X-DB-HD-BpA-pilot1_B02_s1_thumb1E64F2F4-E1E8-410C-9891-A491D91FC73C.tif
__pattern_path_and_file = re.compile('^'
+ '.*' # any
+ 'MDC_pharmbio/(.*?)/' # project (1)
+ '(.*?)/' # plate (2)
+ '([0-9]{4})-([0-9]{2})-([0-9]{2})' # date (yyyy, mm, dd) (3,4,5)
+ '.*' # Any
+ 'TimePoint_([^\/]+)' # Timepoint (6)
+ '\/([^-]+)' # cell-line-name (7)
+ '-([^-]+)' # magnification (8)
+ '-([^_]+)' # plate-short (9)
+ '_([^_]+)' # well (10)
+ '_s([0-9])' # wellsample (11) # OBS! Only 9 wellsamples
+ '(_w[0-9])?' # optional channel (12)
+ '(_thumb)?' # Thumbnail (11)
+ '([A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12})' # Image GUID [12]
+ '(\.tiff?)?' # Extension [13]
+ '$'
,
re.IGNORECASE) # Windows has case-insensitive filenames
def parse_path_and_file(path):
# If something errors (file not parsable with this parser, then exception and return None)
try:
match = re.search(__pattern_path_and_file, path)
#logging.debug(match)
if match is None:
return None
# parse channel
# first set default
channel = 1
if match.group(12):
# remove _w from match
channel = match.group(12)[1]
metadata = {
'path': path,
'filename': os.path.basename(path),
'date_year': int(match.group(3)),
'date_month': int(match.group(4)),
'date_day_of_month': int(match.group(5)),
'timepoint': int(match.group(6)),
'project': match.group(1),
'magnification': match.group(8),
'plate': match.group(2),
'plate_acq_name': path,
'well': match.group(10),
'wellsample': match.group(11),
'channel': channel,
'is_thumbnail': match.group(13) is not None,
'guid': match.group(14),
'extension': match.group(15),
'channel_map_id': 1,
'microscope': "ImageXpress",
'parser': os.path.basename(__file__)
}
return metadata
except:
logging.debug("could not parse filename with this parser")
return None
if __name__ == '__main__':
#
# Configure logging
#
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
# Testparse
retval = parse_path_and_file("/share/mikro/IMX/MDC_pharmbio/exp-TimeLapse/A549-20X-DB-HD-BpA-pilot1/2019-03-27/84/TimePoint_1/A549-20X-DB-HD-BpA-pilot1_B02_s1_w1_thumb1E64F2F4-E1E8-410C-9891-A491D91FC73C.tif")
print("retval = " + str(retval))
retval = parse_path_and_file("/share/mikro/IMX/MDC_pharmbio/kinase378-v1/kinase378-v1-FA-P015240-HOG-48h-P2-L5-r1/2022-03-11/965/kinase378-v1-FA-P015240-HOG-48h-P2-L5-r1_B02_s8_w3_thumb3DF2C4AE-602A-46F6-84B2-9B31D1981B60.tif")
print("retval = " + str(retval))
|
# Generated by Django 2.0.2 on 2018-02-10 06:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demo', '0021_files_file_id'),
]
operations = [
migrations.AlterField(
model_name='files',
name='file_id',
field=models.CharField(blank=True, default='<function uuid4 at 0x7f75690e7158>', max_length=200, unique=True),
),
]
|
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import *
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
import time
class BasePage(object):
def __init__(self):
self.driver = webdriver.Chrome()
self.timeout = 30
def navigate(self, address):
self.driver.get(address) |
# TCP port to be used for twisted communication
COMMUNICATION_PORT = 7999
# Created services, each item represent one service and the master and worker
# class contains business logic for the specific services.
SERVICES = {
'http': {
'MasterClass': "master.services.http.nginx_master_service.NginxMasterService",
'WorkerClass': "worker.services.http.nodejs_worker_service.NodejsWorkerService",
},
} |
import itertools
from flask import g, request, render_template, json
from brutus_module_math import app
from brutus_module_math import nlCalc
@app.route('/')
def index():
"""
Get the index page.
"""
return "Brutus Math Module"
@app.route('/api/request', methods=['POST'])
def create_request():
"""
Get requests or create a new request.
"""
data = request.get_json()
input_data = data['input']
contents = input_data['text']
# 'what is ten plus ten'
resultstring = nlCalc.calculate(contents)
result = {"input": input_data, 'output': {'text': resultstring}}
return json.jsonify(result)
|
from django.db import models
# Create your models here.
class Product(models.Model):
UM = 'um'
MS = 'ms'
PS = 'ps'
SHOPS = [
(UM, UM),
(MS, MS),
(PS, PS),
]
product_id = models.IntegerField('商品ID', unique=True)
stock_pcs = models.IntegerField('商品庫存數量', default=0)
price = models.IntegerField('商品單價', default=0)
shop_id = models.CharField('商品所屬館別', choices=SHOPS, max_length=2)
vip = models.BooleanField('VIP限定商品', default=False)
def __str__(self):
return str(self.product_id)
def sold(self, number):
if self.stock_pcs - number < 0:
raise ValueError('庫存不足')
self.stock_pcs -= number
self.save(update_fields=['stock_pcs'])
def purchase(self, number):
self.stock_pcs += number
self.save(update_fields=['stock_pcs'])
class Order(models.Model):
product_id = models.IntegerField('商品ID')
qty = models.IntegerField('購買數量', default=0)
price = models.IntegerField('商品單價', default=0)
shop_id = models.CharField('商品所屬館別', max_length=2)
customer_id = models.IntegerField('顧客ID')
def __str__(self):
return 'Product:{0}-Qty:{1}'.format(self.product_id, self.qty)
|
from segmentation.segmentation_abstract import Segmentation
import pandas as pd
class ActivityWindow(Segmentation):
def applyParams(self, params):
res = super().applyParams(params)
return res
def segment3(self, buffer):
for i, row in self.a_events.iterrows():
act = row.Activity
sindex = buffer.searchTime(row.StartTime, -1)
if (sindex is None):
continue
# try:
eindex = buffer.searchTime(row.EndTime, +1)
# etime=buffer.times[eindex]
if eindex == None:
eindex = sindex
idx = range(sindex, eindex + 1)
yield idx, act
class SlidingEventActivityWindow(Segmentation):
def applyParams(self, params):
shift = params['shift']
size = params['size']
if not (shift > 0):
return False
if not (size > 0):
return False
if (shift > size):
return False
try:
shift = int(shift)
size = int(size)
except:
return False
return super().applyParams(params)
# def _create_segments(self, sindex, eindex, act):
# olds = 0
# olde = 0
# for i in range(sindex-self.size+1, eindex+1, self.shift):
# s = max(i, sindex)
# e = min(i+self.size, eindex+1)
# if s == olds and e == olde:
# continue
# olds, olde = s, e
# yield range(s, e), act
def _create_segments(self, sindex, eindex, act):
for i in range(sindex, max(sindex, eindex-self.size+1)+1, self.shift):
s = max(i, sindex)
e = min(i+self.size, eindex+1)
yield range(s, e), act
def segment3(self, buffer):
olde = 0
for i, row in self.a_events.iterrows():
act = row.Activity
sindex = buffer.searchTime(row.StartTime, -1)
if (sindex is None):
continue
if olde < sindex-1:
yield from self._create_segments(olde, sindex-1, 0)
eindex = buffer.searchTime(row.EndTime, +1)
# etime=buffer.times[eindex]
if eindex == None:
eindex = sindex
olde = eindex+1
if sindex < eindex:
yield from self._create_segments(sindex, eindex, act)
last = len(buffer.times)-1
if last > olde:
# print(f'creating segments from {olde} to {last} for act={0}')
yield from self._create_segments(olde, last, 0)
|
#creates random database with paths for 50 nodes
import random
f=open("db.txt","a")
x=0
while x < 50:
q=random.randint(0,50)
f.write(str(q))
f.write(" ")
p=random.randint(0,50)
f.write(str(p))
f.write(" ")
if p==q:
f.write('0')
else:
f.write(str(random.randint(1,105)))
f.write("\n")
x+=1
f.close(); |
#matrixTools.py
#Sunday, September 27, 2020
import staticMatrix
def buildMatrix(inputString, formattingFunction):
data = []
iterator = 0
tempRow = []
while iterator < len(inputString):
character = inputString[iterator]
if character.isspace() or character == ",":
iterator = iterator + 1
continue
elif character == ";":
if len(tempRow) != 0:
data.append(tempRow)
iterator = iterator + 1
tempRow = []
continue
else:
word = ""
while character.isnumeric() or character == "." or character == "-":
word = word + character
iterator = iterator + 1
if iterator < len(inputString):
character = inputString[iterator]
else:
break
tempRow.append(formattingFunction(word))
if len(tempRow) != 0:
data.append(tempRow)
rows = len(data)
if rows == 0:
raise staticMatrix.invalidMatrixDimensionsError
columns = len(data[0])
returnMatrix = staticMatrix.staticMatrix(rows, columns)
returnMatrix.setData(data)
return returnMatrix
def add(matrix1, matrix2):
if matrix1.rows != matrix2.rows:
raise staticMatrix.invalidMatrixDimensionsError
else:
rows = matrix1.rows
if matrix1.columns != matrix2.columns2:
raise staticMatrix.invalidMatrixDimensionsError
else:
columns = matrix1.columns
data = []
for i in range(rows):
tempRow = []
for j in range(columns):
entry = matrix1.data[i][j] + matrix2.data[i][j]
tempRow.append(entry)
data.append(tempRow)
returnMatrix = staticMatrix.staticMatrix(rows, columns)
returnMatrix.setData(data)
return returnMatrix
def scalarMultiply(matrix, scalar):
data = []
for i in range(matrix.rows):
tempRow = []
for j in range(matrix1.columns):
entry = matrix.data[i][j] * scalar
tempRow.append(entry)
data.append(tempRow)
returnMatrix = staticMatrix.staticMatrix(matrix.rows, matrix.columns)
returnMatrix.setData(data)
return returnMatrix
|
"""
Homework 2
Create 5 dictionaries. Each dictionary should represent a CV.
Create a CV containing the information of the 5 created people.
Print the information on CVs created on the screen.
"""
mehmet_cv = {"Name": "Mehmet",
"Surname": "Altın",
"Age": 34,
"Gender": "Male",
"Job": "Doctor",
"Job experience": "5 Years",
"Language": "English"}
ahmet_cv = {"Name": "Ahmet",
"Surname": "Demir",
"Age": 25,
"Gender": "Male",
"Job": "Engineer",
"Job experience": "1 Year",
"Language": "Turkish"}
alper_cv = {"Name": "Alper",
"Surname": "Bakır",
"Age": 22,
"Gender": "Male",
"Job": "Cashier",
"Job experience": "2 Years",
"Language": "Russian"}
nehir_cv = {"Name": "Nehir",
"Surname": "Civa",
"Age": 27,
"Gender": "Female",
"Job": "Teacher",
"Job experience": "3 Years",
"Language": "Turkish"}
ezgi_cv = {"Name": "Ezgi",
"Surname": "Çelik",
"Age": 29,
"Gender": "Female",
"Job": "Freelancer",
"Job experience": "2 Years",
"Language": "English"}
cv_s = [mehmet_cv, ahmet_cv, alper_cv, nehir_cv, ezgi_cv] # Putting dictionaries in a list
for i in cv_s: # Looping in a list for dictionaries
for k, v in i.items(): # Looping in a dictionary for items
print(f"{k}: {v}") # Printing the keys and the values
if not cv_s[-1] == i: # Getting the dictionaries in the list except for the last one
if k == "Language": # İf key equals to "languages" key prints the separating line
print("--" * 12)
|
import shelve
import re, os
from urllib.parse import urlparse
from PartA import tokenize, computeWordFrequencies
stopWord = '''
a
about
above
after
again
against
all
am
an
and
any
are
aren't
as
at
be
because
been
before
being
below
between
both
but
by
can't
cannot
could
couldn't
did
didn't
do
does
doesn't
doing
don't
down
during
each
few
for
from
further
had
hadn't
has
hasn't
have
haven't
having
he
he'd
he'll
he's
her
here
here's
hers
herself
him
himself
his
how
how's
i
i'd
i'll
i'm
i've
if
in
into
is
isn't
it
it's
its
itself
let's
me
more
most
mustn't
my
myself
no
nor
not
of
off
on
once
only
or
other
ought
our
ours
ourselves
out
over
own
same
shan't
she
she'd
she'll
she's
should
shouldn't
so
some
such
than
that
that's
the
their
theirs
them
themselves
then
there
there's
these
they
they'd
they'll
they're
they've
this
those
through
to
too
under
until
up
very
was
wasn't
we
we'd
we'll
we're
we've
were
weren't
what
what's
when
when's
where
where's
which
while
who
who's
whom
why
why's
with
won't
would
wouldn't
you
you'd
you'll
you're
you've
you
yours
yourself
yourselves'''
def writeReport():
try:
if os.path.exists("summary.txt"):
os.remove('summary.txt')
if os.path.exists("all_content.txt"):
os.remove('all_content.txt')
s = shelve.open('urlText.db')
f = open('summary.txt', 'w')
f1 = open('all_content.txt', 'w')
f.write(f'There are {len(s)} pages found\n')
f.write('------------------above are pages found------------------------------------------------\n')
longest = 0
longest_url = ''
subdomain = set()
for url, content in s.items():
f1.write(content + '\n')
parsed = urlparse(url)
if re.match('.+\.ics\.uci\.edu', parsed.netloc):
subdomain.add(parsed.netloc)
if longest < len(content.split()):
longest = len(content.split())
longest_url = url
for u in subdomain:
f.write(f'{u}\n')
f.write(
f'------------------above are {len(subdomain)} subdomains------------------------------------------------\n')
f.write(f'The page that has most words is {longest_url}, and it has {longest} words\n')
f.write('------------------above are longest page----------------------------------------------\n')
f1.close()
i = 1
the_dict = computeWordFrequencies(tokenize('all_content.txt'))
for key, value in sorted(the_dict.items(), key=lambda x: -x[1]):
if key in stopWord: continue
if i > 50: break
i += 1
f.write(f'{key}->{value}\n')
f.write('------------------above are 50 top words except English stop word---------------------\n')
except:
pass
finally:
s.close()
f.close()
if __name__ == '__main__':
writeReport()
|
import pandas as pd
from sklearn.svm import SVC, LinearSVC
from sklearn import model_selection
from sklearn import metrics
import warnings
warnings.filterwarnings('ignore') # "error", "ignore", "always", "default", "module" or "once"
glass = pd.read_csv("glass.csv")
# Preprocessing data
X = glass.drop('Type', axis=1)
Y = glass['Type']
# Split dataset into training set and test set with 60% of the data for testing and 40% for training
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=0.4)
# SVM model is trained and it is fitted for the training data
svc = SVC()
svc.fit(X_train, Y_train)
Y_predicted_output = svc.predict(X_test)
# Model Accuracy is calculated by comparing predicted output by the model and the actual output
print("accuracy score:", metrics.accuracy_score(Y_test, Y_predicted_output) * 100)
print(metrics.classification_report(Y_test, Y_predicted_output))
|
from hashlib import md5
from io import BytesIO
from aspen.simplates.pagination import parse_specline, split_and_escape
from babel.messages.extract import extract_python
import jinja2.ext
JINJA_BASE_OPTIONS = dict(
trim_blocks=True, lstrip_blocks=True,
line_statement_prefix='%',
extensions=['jinja2.ext.do'],
)
def extract_custom(extractor, *args, **kw):
for match in extractor(*args, **kw):
msg = match[2]
if isinstance(msg, tuple) and msg[0] == '':
unused = "<unused singular (hash=%s)>" % md5(msg[1].encode('utf8')).hexdigest()
msg = (unused, msg[1], msg[2])
match = (match[0], match[1], msg, match[3])
yield match
def extract_jinja2_custom(*args, **kw):
options = kw.setdefault('options', {})
for k, v in JINJA_BASE_OPTIONS.items():
if isinstance(v, bool):
v = str(v)
elif isinstance(v, list):
v = ','.join(v)
options.setdefault(k, v)
return extract_custom(jinja2.ext.babel_extract, *args, **kw)
def extract_python_custom(*args, **kw):
return extract_custom(extract_python, *args, **kw)
def extract_spt(fileobj, *args, **kw):
pages = list(split_and_escape(fileobj.read().decode('utf8')))
npages = len(pages)
for i, page in enumerate(pages, 1):
f = BytesIO(b'\n' * page.offset + page.content.encode('utf8'))
content_type, renderer = parse_specline(page.header)
extractor = None
python_page = i < 3 and i < npages and not page.header
json_page = renderer == 'json_dump'
if python_page or json_page:
extractor = extract_python_custom
else:
extractor = extract_jinja2_custom
if extractor:
for match in extractor(f, *args, **kw):
yield match
|
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn.datasets import load_digits
import matplotlib.pyplot as plt
digits = load_digits()
'''from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(digits.data,digits.target,test_size=0.3)
lr = LogisticRegression(solver='liblinear',multi_class='ovr')
lr.fit(X_train, y_train)
print("logistic reg")
print(lr.score(X_test, y_test))
svm = SVC(gamma='auto')
svm.fit(X_train, y_train)
print("SVM score")
print(svm.score(X_test, y_test))
rf = RandomForestClassifier(n_estimators=40)
rf.fit(X_train, y_train)
print("Random Forest score")
print(rf.score(X_test, y_test))'''
'''from sklearn.model_selection import KFold
kf = KFold(n_splits=3)
kf
for train_index, test_index in kf.split([1,2,3,4,5,6,7,8,9]):
print(train_index, test_index)'''
def get_score(model, X_train, X_test, y_train, y_test):
model.fit(X_train, y_train)
return model.score(X_test, y_test)
from sklearn.model_selection import StratifiedKFold
folds = StratifiedKFold(n_splits=3)
'''scores_logistic = []
scores_svm = []
scores_rf = []
for train_index, test_index in folds.split(digits.data,digits.target):
X_train, X_test, y_train, y_test = digits.data[train_index], digits.data[test_index], \
digits.target[train_index], digits.target[test_index]
scores_logistic.append(get_score(LogisticRegression(solver='liblinear',multi_class='ovr'), X_train, X_test, y_train, y_test))
scores_svm.append(get_score(SVC(gamma='auto'), X_train, X_test, y_train, y_test))
scores_rf.append(get_score(RandomForestClassifier(n_estimators=40), X_train, X_test, y_train, y_test))
print(scores_logistic)
print(scores_svm)
print(scores_rf)'''
from sklearn.model_selection import cross_val_score
print("cross_lR_score")
score1=cross_val_score(LogisticRegression(solver='liblinear',multi_class='ovr'), digits.data, digits.target,cv=3)
print(np.average(score1))
print("cross_svc_score")
score2=cross_val_score(SVC(gamma='auto'), digits.data, digits.target,cv=3)
print(np.average(score2))
print("cross_RF_score")
score3=cross_val_score(RandomForestClassifier(n_estimators=40),digits.data, digits.target,cv=4)
print(np.average(score3))
|
from cryptofeed import FeedHandler
from cryptofeed.defines import LIQUIDATIONS
from cryptofeed.exchanges import EXCHANGE_MAP
async def liquidations(data, receipt):
print(f'Cryptofeed Receipt: {receipt} Exchange: {data.exchange} Symbol: {data.symbol} Side: {data.side} Quantity: {data.quantity} Price: {data.price} ID: {data.id} Status: {data.status}')
def main():
f = FeedHandler()
configured = []
print("Querying exchange metadata")
for exchange_string, exchange_class in EXCHANGE_MAP.items():
if LIQUIDATIONS in exchange_class.info()['channels']['websocket']:
configured.append(exchange_string)
symbols = [sym for sym in exchange_class.symbols() if 'PINDEX' not in sym]
f.add_feed(exchange_class(subscription={LIQUIDATIONS: symbols}, callbacks={LIQUIDATIONS: liquidations}))
print("Starting feedhandler for exchanges:", ', '.join(configured))
f.run()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""Contains `KappaComplex`, a class to represents a list of agents chained into a larger entity, and the `embed_and_map`
function."""
import re
import networkx as nx
from collections import deque
from typing import Deque, Dict, List, Set, Tuple, Union
from .KappaMultiAgentGraph import KappaMultiAgentGraph
from .KappaAgent import KappaAgent
from .KappaBond import KappaBond
from .KappaError import ComplexParseError, AgentParseError
from .KappaSite import KappaPort
class KappaComplex(KappaMultiAgentGraph):
"""Class for representing Kappa complexes. E.g. `A(b[1] s{u}[.]), B(a[1] c[2]), C(b[2] a[3]), A(c[3] s[.]{x})`."""
# define agent pattern
_agent_idnt_pat = r'(?:x\d+:)?'
_agent_name_pat = r'(?:[_~][a-zA-Z0-9_~+-]+|[a-zA-Z][a-zA-Z0-9_~+-]*)'
_agent_sign_pat = r'\([^()]*\)'
_agent_pat = _agent_idnt_pat + _agent_name_pat + _agent_sign_pat
_agent_pat_re = re.compile(_agent_pat)
def __init__(self, expression: str):
self._raw_expression: str
self._agents: List[KappaAgent]
self._agent_by_idents: Dict[int, KappaAgent]
"""Maps an identifier to an Agent; empty if no identifiers present"""
self._agent_types: Set[KappaAgent]
self._kappa_expression: str
self._composition: Dict[KappaAgent, int]
"""Maps an agent type to an abundance"""
self._raw_expression = expression
# get the set of agents making up this complex
matches = self._agent_pat_re.findall(expression.strip())
if len(matches) == 0:
raise ComplexParseError('Complex <' + self._raw_expression + '> appears to have zero agents.')
try:
agent_list: List[KappaAgent] = []
agent_idents = []
agent_types = set()
composition = {}
for item in matches:
agent = KappaAgent(item)
agent_list.append(agent)
if agent.get_agent_identifier() is not None:
agent_idents.append(agent.get_agent_identifier())
# update type set, composition structures
agent_type = KappaAgent(agent.get_agent_name() + '()')
agent_types.update([agent_type])
if agent_type in composition:
composition[agent_type] += 1
else:
composition[agent_type] = 1
except AgentParseError as a:
raise ComplexParseError('Could not parse agents in complex <' + expression + '>.') from a
self._agents = sorted(agent_list)
self._agent_types = agent_types
self._composition = dict(sorted(composition.items(), key=lambda item: item[1]))
# deal with agent identifier map; 0 is a valid identifier
if all([isinstance(ag.get_agent_identifier(), int) for ag in agent_list]):
self._agent_by_idents = {agent.get_agent_identifier(): agent for agent in agent_list}
elif any([isinstance(ag.get_agent_identifier(), int) for ag in agent_list]):
Warning('Expression contains identifier for only a subset of agents!\n{}'.format(self._raw_expression))
self._agent_by_idents = {}
else:
self._agent_by_idents = {}
# canonicalize the kappa expression
self._kappa_expression = ', '.join([str(agent) for agent in self._agents])
def get_number_of_bonds(self) -> int:
"""Returns the number of bonds in the complex."""
bonds = set()
for agent in self._agents:
bonds.update(agent.get_bond_identifiers())
return len(bonds)
def get_agents_of_bond(self, bond_id: Union[int, str]) -> Union[Tuple[KappaAgent, KappaAgent], None]:
"""Returns a tuple with both KappaAgents on either side of the requested bond identifier, or None if the bond
is unkown to this complex."""
if isinstance(bond_id, int):
bond_id = str(bond_id)
terminii = []
for this_agent in self._agents:
if bond_id in this_agent.get_bond_identifiers():
terminii.append(this_agent)
if len(terminii) == 2:
return terminii
else:
return None
def get_size_of_complex(self) -> int:
"""Returns the size, in agents, of this complex."""
return len(self._agents)
def get_agent_types(self) -> Set[KappaAgent]:
"""Returns the set of agent names (or agent types) that make up the complex."""
return self._agent_types
def get_all_agents(self) -> List[KappaAgent]:
"""Returns a list of KappaAgents, filled with agents plus their signatures, present in this complex."""
# replace commas with spaces, then split string into a list at closing parenthesis
return self._agents
def get_complex_composition(self) -> Dict[KappaAgent, int]:
"""Returns a dictionary where the key is an agent (fully qualified, not just a name), and the value the number
of times that agent appears in this complex."""
return self._composition
def get_number_of_embeddings_of_agent(self, query) -> int:
"""Returns the number of embeddings the query agent has on the KappaComplex. For the 'truth table' of site
nomenclature, see `KappaPort`."""
# type the query into an Agent, if it's not one already
if not type(query) is KappaAgent:
q_agent = KappaAgent(query)
else:
q_agent = query
# iterate over agents, checking if query is in each agent
match_number = 0
for s_agent in self._agents:
if q_agent in s_agent:
match_number += 1
return match_number
def get_number_of_embeddings_of_complex(self, query, symmetry_adjust: bool = True) -> int:
"""Returns the number of embeddings the query complex has on the KappaComplex. Optional parameter to not perform
the symmetry adjustment and report number of raw embeddings. See the `embed_and_map` function for examples and
advanced usage."""
if not type(query) is KappaComplex:
q_complex = KappaComplex(query)
else:
q_complex = query
total_maps, unique_maps = embed_and_map(q_complex, self)
if symmetry_adjust:
return len(unique_maps)
else:
return len(total_maps)
def get_number_of_embeddings(self, query, symmetry_adjust: bool = True) -> int:
"""Wrapper for the two specialized functions, for agent and complex. Optional parameter to not perform
the symmetry adjustment and report the number of raw embeddings. See the `embed_and_map` function for examples
and advanced usage."""
if type(query) is KappaAgent:
return self.get_number_of_embeddings_of_agent(query)
elif type(query) is KappaComplex:
return self.get_number_of_embeddings_of_complex(query, symmetry_adjust)
else:
try:
try:
return self.get_number_of_embeddings_of_agent(KappaAgent(query))
except AgentParseError:
return self.get_number_of_embeddings_of_complex(KappaComplex(query), symmetry_adjust)
except ComplexParseError:
raise ValueError('Could not parse <{}> as a KappaAgent nor as a KappaComplex.'.format(query))
def get_agent_identifiers(self) -> List[int]:
"""Returns a list with the numeric agent identifiers, if any."""
return self._agent_by_idents.keys()
def get_agent_from_identifier(self, ident: int) -> Union[KappaAgent, None]:
"""Returns the KappaAgent associated to provided identifier, if any."""
if ident not in self._agent_by_idents:
Warning('Returnin None; identifier {} not present in complex\n{}'.format(ident, self._kappa_expression))
return None
else:
return self._agent_by_idents[ident]
def to_networkx(self, identifier_offset: int = 0) -> nx.MultiGraph:
"""Returns a Multigraph representation of the complex, abstracting away binding site data. Nodes represent
agents, edges their bonds. Nodes have an attribute dictionary where the key `kappa` holds the KappaAgent.
Edges have an attribute dictionary where the key `bond id` holds the bond identifier from the Kappa expression.
Node identifiers are integers, using the order of agent declaration. For a graph `g`, `g.nodes.data()` displays
the node identifiers and their corresponding `KappaAgents`, and `g.edges.data()` displays the edges, using the
node identifiers as well as the kappa identifiers.
The optional parameter `identifier_offset` will offset all numeric identifiers reported; used in unlabeled
snapshots, or when combining graphs."""
kappa_complex_multigraph = nx.MultiGraph()
dangle_bond_dict = {} # store unpaired bonds he
paired_bond_list = [] # store tuples of (agent index 1, agent index 2, bond identifier)
agent_counter = 0 # if using un-labeled kappa, default to this
for agent in self.get_all_agents():
if agent.get_agent_identifier():
agent_global_id = agent.get_agent_identifier() + identifier_offset
else:
agent_global_id = agent_counter + identifier_offset
kappa_complex_multigraph.add_node(agent_global_id, kappa=agent)
for bond in agent.get_bond_identifiers():
# if we've already seen this edge and it is in the dangling list, it's partner has already been matched,
# so we can add this terminus to the bond database and delete from the dangle list
if bond in dangle_bond_dict:
# special case for self-bonds: the first pass already got the alphabetically lower terminus,
# so this pass should get the second terminus of the bond
if len(agent.get_terminii_of_bond(bond)) > 1:
second_terminus = agent.get_terminii_of_bond(bond)[1]
else:
second_terminus = agent.get_terminii_of_bond(bond)[0]
this_bond_type = KappaBond(agent_one=dangle_bond_dict[bond]['agent name'],
site_one=dangle_bond_dict[bond]['site name'],
agent_two=agent.get_agent_name(),
site_two=second_terminus)
paired_bond_list.append((dangle_bond_dict[bond]['agent id'], agent_global_id,
{'bond id': bond, 'bond type': this_bond_type}))
del dangle_bond_dict[bond]
else:
dangle_bond_dict[bond] = {'agent id': agent_global_id,
'agent name': agent.get_agent_name(),
'site name': agent.get_terminii_of_bond(bond)[0]}
agent_counter += 1
# if anything remains in the dangling bond list, it means we failed to pair at least one bond terminus
if dangle_bond_dict:
raise ValueError('Dangling bonds <' + ','.join(dangle_bond_dict.keys()) +
'> found in complex: ' + self._raw_expression)
kappa_complex_multigraph.add_edges_from(paired_bond_list)
return kappa_complex_multigraph
def to_cytoscape_cx(self) -> List[Dict]:
"""
Export to a structure that, via some json encoding and dumping, can be read by Cytoscape as a CX file. Usage:
>>> my_cx = my_complex.to_cytoscape_cx()
>>> with open('my_cx.cx', 'w') as out_file:
json.dump(my_cx, out_file)
"""
cx_data = self._kappa_to_cytoscape_cx()
cx_network_attributes = [{'n': 'name', 'v': 'network'}]
cx_data.insert(2, {'networkAttributes': cx_network_attributes})
return cx_data
class NetMap():
"""Class for representing network maps, used for automorphism checking."""
node_map: Set[Tuple[int, int]]
"""List of tuples, holding the node index of one network that matches the node index in the other."""
edge_map: Set[Tuple[int, int]]
"""List of tuples, holding the edge index of one network that matches the edge index in the other."""
def __init__(self):
self.node_map = set()
self.edge_map = set()
def __str__(self) -> str:
nodes: str = ', '.join(['{} -> {}'.format(a, b) for a, b in self.node_map])
edges: str = ', '.join(['{} -> {}'.format(a, b) for a, b in self.edge_map])
return 'Nodes: {}\nEdges: {}'.format(nodes, edges)
def __eq__(self, other) -> bool:
return True if self.__hash__() == other.__hash__() else False
def __hash__(self) -> int:
origin_n, image_n = zip(*self.node_map)
if len(self.edge_map) > 0:
origin_e, image_e = zip(*self.edge_map)
own_hash = hash((tuple(sorted(origin_n)), tuple(sorted(image_n)), tuple(sorted(origin_e)), tuple(sorted(image_e))))
else:
own_hash = hash((tuple(sorted(origin_n)), tuple(sorted(image_n)), None, None))
return own_hash
def embed_and_map(ka_query: KappaComplex, ka_target: KappaComplex) -> Tuple[List[NetMap], Set[NetMap]]:
"""
Calculates all the embeddings of `ka_query` into `ka_target`. First element is the list of all mappings,
while second is the set of automorphism-corrected mappings. For a rotational symmetry:
>>> from KaSaAn.core.KappaComplex import embed_and_map, KappaComplex
>>> my_comp = KappaComplex('Bob(h[10], t[11]), Bob(h[11], t[12]), Bob(h[12], t[10])')
>>> maps_all, maps_unique = embed_and_map(my_comp, my_comp)
>>> maps_all
[[(0, 0), (2, 2), (1, 1)], [(0, 1), (2, 0), (1, 2)], [(0, 2), (2, 1), (1, 0)]]
>>> maps_unique
[[(0, 0), (2, 2), (1, 1)]]
There are three ways of satisfying the query in the target, and these rotations inflate the number of "embeddings".
However, the set of identifiers making up the image of the query in the target is the same for these three:
`(0, 2, 1)`, `(1, 0, 2)`, and `(2, 1, 0)` are equivalent, and so the target contains only one copy of the query.
These dual-purpose interpretation of the "embedding" concept yields a function that returns both.
"""
# litany of short circuits
if ka_query.get_size_of_complex() > ka_target.get_size_of_complex(): # not enough agents
return ([], [])
if ka_query.get_number_of_bonds() > ka_target.get_number_of_bonds(): # not enough bonds
return ([], [])
query_comp = ka_query.get_complex_composition()
target_comp = ka_target.get_complex_composition()
if not set(query_comp) <= set(target_comp): # query agent(s) not present in target
return ([], [])
for agent_type, query_abundance in query_comp.items():
if agent_type not in target_comp: # query agent missing in target
return ([], [])
if target_comp[agent_type] < query_abundance: # target sum formula too small
return ([], [])
# start from the least abundant type, get their node indexes in query network and target network;
# from the <<query is improper subset of target>> check above, all of query's are in target by type, so query's
# minimum must be a type in common; moreover all query's type abundances are equal or greater in target, so if
# a type is the minimum in query, its abundance will also be either *the*, or just *a*, minimum in target
common_min: KappaAgent = next(iter(query_comp))
query_network = ka_query.to_networkx()
target_network = ka_target.to_networkx()
if not nx.is_connected(query_network):
raise ValueError('Error: query is not a connected graph.')
if not nx.is_connected(target_network):
raise ValueError('Error: target is not a connected graph.')
common_min_q: List[int] = []
for node_id in query_network.nodes:
if common_min in query_network.nodes[node_id]['kappa']:
common_min_q.append(node_id)
common_min_t: List[int] = []
for node_id in target_network.nodes:
if common_min in target_network.nodes[node_id]['kappa']:
common_min_t.append(node_id)
# embark on systematic traversal
query_start_node = common_min_q[0]
maps_all: List[NetMap] = []
maps_distinct: Set[NetMap] = set()
for target_start_node in common_min_t:
map_found = _traverse_from(query_network, target_network, query_start_node, target_start_node)
if map_found:
maps_all.append(map_found)
maps_distinct.add(map_found)
return maps_all, maps_distinct
def _traverse_from(query_net: nx.MultiGraph, target_net: nx.MultiGraph, q_start: int, t_start: int) -> NetMap:
"""Attempt a traversal of `target_net`, starting at `target_start`, matched to `query_start`, following
`query_net`'s topology."""
HopData = tuple[int, int]
node_stack: Deque[HopData] = deque()
node_stack.append(HopData([q_start, t_start]))
# stack of: query-current, query-previous, transforming-bond, target-current, target-previous
# used to traverse the target network, mapping the query-current to the target-current,
# and checking if the bond that led from query-previous to query-current would work for
# the target network
nodes_visited: Set[int] = set()
edges_followed: Set[int] = set()
network_map = NetMap()
while node_stack:
q_node, t_node = node_stack.pop()
if q_node in nodes_visited:
node_matched = True # if we visited this node already, and it got added to the queue,
edge_matched = True # it must have passed both of these checks
else:
node_matched: bool = _node_match(query_net, target_net, q_node, t_node)
edge_matched: bool = _edge_match(query_net, target_net, q_node, t_node)
if node_matched and edge_matched:
# prepare for next iteration:
# add nodes of query, mapped to their images in target, to the node map;
# add neighbors of query, mapped to their images in target;
# add their respective bonds, mapped to their images in target, to the edge map
network_map.node_map.add((q_node, t_node))
nodes_visited.add(q_node)
for _, q_neighbor, q_data in query_net.edges(q_node, data=True):
q_type: KappaBond = q_data['bond type'] if q_node < q_neighbor else q_data['bond type'].reverse()
q_id = int(q_data['bond id'])
for _, t_neighbor, t_data in target_net.edges(t_node, data=True):
t_type: KappaBond = t_data['bond type'] if t_node < t_neighbor else t_data['bond type'].reverse()
t_id = int(t_data['bond id'])
if q_type == t_type:
if q_id not in edges_followed: # cycle prevention
edges_followed.add(q_id)
network_map.edge_map.add((q_id, t_id))
valid_hop = HopData([q_neighbor, t_neighbor])
node_stack.append(valid_hop)
else:
return []
return network_map
def _node_match(query_net: nx.MultiGraph, target_net: nx.MultiGraph, query_node: int, target_node: int) -> bool:
"""Special purpose matcher that ignores bond types, considering only if sites are bound. Internal states are
matched normally."""
match: bool = False
query = query_net.nodes[query_node]['kappa']
target = target_net.nodes[target_node]['kappa']
for site in query.get_agent_ports():
s_name = site.get_port_name()
s_stat = '{' + site.get_port_int_state() + '}'
s_bond = '[' + site.get_port_bond_state() + ']' if site.get_port_bond_state() in ['.', '_', '#'] else '[_]'
relaxed_port = KappaPort(s_name + s_bond + s_stat)
if not any([relaxed_port in t_site for t_site in target.get_agent_ports()]):
return match
match = True
return match
def _edge_match(query_net: nx.MultiGraph, target_net: nx.MultiGraph, query_node: int, target_node: int) -> bool:
"""
Special purpose matcher that only compares bond types. Returns a tuple, where the first value is a boolean
holding whether the whole thing matched.
Rigidity in bonds in addition to site ordering!
Since there is at maximum one of any bond type per node in the network, finding it means finding the only path
like it, reducing the search space. This relies on the bonds being oriented, in this case I chose "outgoing" from
the current node. Since agent identifiers can be written in arbitrary order, I can't rely on just a<b comparison
at the identifier level to know if the bond's string was written in that same direction; to resolve this one must
compare the order of node introduction, which tracks the order of edge / node declaration.
"""
match: bool = False
# orient bonds so as to be read outgoing
query_edges: List[Tuple[int, int, Dict]] = query_net.edges(query_node, data=True)
query_bond_types: List[KappaBond] = []
for here, dest, bond in query_edges:
if here < dest:
query_bond_types.append(bond['bond type'])
else:
query_bond_types.append(bond['bond type'].reverse())
target_edges: List[Tuple[int, int, Dict]] = target_net.edges(target_node, data=True)
target_bond_types: List[KappaBond] = []
for here, dest, bond in target_edges:
if here < dest:
target_bond_types.append(bond['bond type'])
else:
target_bond_types.append(bond['bond type'].reverse())
# check now-oriented bonds
for query_bond_type in query_bond_types:
if not any([query_bond_type == target_bond_type for target_bond_type in target_bond_types]):
return match
match = True
return match
|
from pico2d import *
import game_framework
from bullet import *
from buff import *
from hpmp import *
import game_world
import main_state
IDLE_STATE = 0
RUN_STATE = 1
BACKSTEP_STATE = 2
JUMP_STATE = 3
DOWN_STATE = 4
ATTACK_STATE = 5
DEFENSE_STATE = 6
HP_HEAL_STATE = 7
MP_HEAL_STATE = 8
GUN_STATE = 9
SPECIAL_ATTACK_STATE = 10
TIME_PER_ACTION = 0.5
ACTION_PER_TIME = 1.0 / TIME_PER_ACTION
PIXEL_PER_METER = (10.0 / 0.3)
RUN_SPEED_KMPH = 20.0
RUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0/60.0)
RUN_SPEED_MPS = (RUN_SPEED_MPM / 60)
RUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)
class Player:
def __init__(self):
self.x, self.y = 100, 410
self.frame_x, self.frame_y = None, None
self.size_x, self.size_y = 80, 100
self.image = load_image('sprites\\player\\zerox.png')
self.my_turn = True
self.die = load_image('sprites\\player\\youdie.png')
self.hp, self.mp = Hp(), Mp()
self.gun_is_fired = False
self.bullet = None
self.buff = Barrier(self.x, self.y)
self.now_animation = IDLE_STATE
self.Idle_animation_pos = [(0, 900, 75, 100), (75, 900, 75, 100)] # 0
self.run_animation_pos = [(0, 500, 75, 100), (80, 500, 75, 100), (160, 500, 75, 100), (240, 500, 75, 100), (320, 500, 75, 100), (400, 500, 75, 100), (480, 500, 75, 100), (570, 500, 75, 100), (650, 500, 75, 100), (740, 500, 75, 100), (820, 500, 75, 100), (895, 500, 75, 100), (975, 500, 75, 100), (1055, 500, 75, 100), (1135, 500, 75, 100), (1220, 500, 75, 100)] # 1
self.attack_animation_pos = [(0, 420, 80, 100), (90, 420, 80, 100), (182, 420, 80, 100), (265, 420, 160, 100), (440, 420, 160, 100), (605, 420, 140, 100), (760, 420, 105, 100), (880, 420, 105, 100), (1000, 420, 80, 100), (90, 420, 80, 100)] # 5
self.defence_animation_pos = [(0, 0), (50, 0), (100, 0)]
self.gun_animation_pos = [(0, 630, 80, 100), (75, 630, 80, 100), (155, 630, 80, 100), (240, 630, 80, 100), (325, 630, 80, 100), (408, 630, 88, 100), (492, 630, 100, 100), (600, 630, 120, 100), (325, 630, 80, 100), (240, 630, 80, 100), (155, 630, 80, 100)] # 9
self.fire_animation_pos = [(5, 1260, 120, 120), (117 ,1250, 150, 140), (270 ,1250, 140, 140), (410, 1250, 140, 150), (550, 1250, 140, 150), (685, 1265, 150, 160), (835, 1265, 150, 160), (980, 1265, 150, 180), (1130, 1265, 150, 180), (1260, 1265, 140, 180), (0, 1025, 140, 180)]
self.frame = 0
self.on_tile = 5
def draw(self):
self.image.clip_draw(self.frame_x, self.frame_y, self.size_x, self.size_y, self.x, self.y)
self.hp.draw()
self.mp.draw()
self.buff.draw()
if self.hp.hp <= 0:
self.die.draw(400, 300)
if self.gun_is_fired is True:
self.bullet.draw()
def update(self):
if self.my_turn is False:
self.Idle_Animation()
elif self.now_animation is IDLE_STATE:
self.Idle_Animation()
elif self.now_animation is RUN_STATE:
self.Run_Animation()
elif self.now_animation is BACKSTEP_STATE:
self.Back_Animation()
elif self.now_animation is JUMP_STATE:
self.Jump_Animation()
elif self.now_animation is DOWN_STATE:
self.Down_Animation()
elif self.now_animation is ATTACK_STATE:
self.Attck_Animation()
elif self.now_animation is DEFENSE_STATE:
self.Defense_Animation()
elif self.now_animation is HP_HEAL_STATE:
self.Heal_Animation()
elif self.now_animation is MP_HEAL_STATE:
self.Mana_Animation()
elif self.now_animation is GUN_STATE:
self.Gun_Animation()
elif self.now_animation is SPECIAL_ATTACK_STATE:
self.Fire_Animation()
if self.gun_is_fired is True:
self.bullet.update()
def handle_events(self):
pass
def update_animation(self, num):
self.now_animation = num
self.frame = 0
def Idle_Animation(self):
self.frame_x, self.frame_y, self.size_x, self.size_y = self.Idle_animation_pos[self.frame]
self.frame = (self.frame + 1) % 2
def Run_Animation(self):
self.frame_x, self.frame_y, self.size_x, self.size_y = self.run_animation_pos[self.frame]
self.frame = (self.frame + 1) % 16
self.x += 200 // 16
if self.buff.buff_on is True:
self.buff.Pos_Update(self.x, self.y)
if self.frame is 15:
self.Change_to_IDLE()
self.on_tile += 1
self.Change_My_Turn()
def Back_Animation(self):
self.frame_x, self.frame_y, self.size_x, self.size_y = self.run_animation_pos[self.frame]
self.frame = (self.frame + 1) % 16
self.x -= 200 // 16
if self.buff.buff_on is True:
self.buff.Pos_Update(self.x, self.y)
if self.frame is 15:
self.Change_to_IDLE()
self.on_tile -= 1
self.Change_My_Turn()
def Jump_Animation(self):
self.frame_x, self.frame_y, self.size_x, self.size_y = self.run_animation_pos[self.frame]
self.frame = (self.frame + 1) % 16
self.y += 160//16
if self.buff.buff_on is True:
self.buff.Pos_Update(self.x, self.y)
if self.frame is 15:
self.Change_to_IDLE()
self.on_tile -= 4
self.Change_My_Turn()
def Down_Animation(self):
self.frame_x, self.frame_y, self.size_x, self.size_y = self.run_animation_pos[self.frame]
self.frame = (self.frame + 1) % 16
self.y -= 160 // 16
if self.buff.buff_on is True:
self.buff.Pos_Update(self.x, self.y)
if self.frame is 15:
self.Change_to_IDLE()
self.on_tile += 4
self.Change_My_Turn()
def Attck_Animation(self):
self.frame_x, self.frame_y, self.size_x, self.size_y = self.attack_animation_pos[self.frame]
self.frame = (self.frame + 1) % 10
wav = load_wav('music\\sword.wav')
wav.set_volume(128)
if self.frame is 9:
wav.play()
if self.on_tile == main_state.monster.on_tile:
main_state.monster.hp -= 2
self.Change_My_Turn()
self.Change_to_IDLE()
def Defense_Animation(self):
self.now_animation = IDLE_STATE
self.buff.Pos_Update(self.x, self.y)
self.buff.update()
self.frame = 0
self.Change_My_Turn()
def Heal_Animation(self):
self.hp.update(-2)
self.now_animation = IDLE_STATE
self.Change_My_Turn()
def Mana_Animation(self):
self.mp.update(-2)
self.now_animation = IDLE_STATE
self.Change_My_Turn()
def Gun_Animation(self):
if self.frame is 0:
self.mp.update(3)
elif self.frame is 8:
self.gun_is_fired = True
self.bullet = Bullet(self.x, self.y)
game_world.add_object(self.bullet, 1)
elif self.frame is 10:
self.Change_to_IDLE()
self.frame_x, self.frame_y, self.size_x, self.size_y = self.gun_animation_pos[self.frame]
self.frame = (self.frame + 1) % 11
# 턴 종료 추가 Bullet에다
def Fire_Animation(self):
if self.frame is 0:
self.mp.update(5)
elif self.frame is 10:
self.Change_to_IDLE()
self.Change_My_Turn()
if self.on_tile <= main_state.monster.on_tile and main_state.monster.on_tile <= self.on_tile + 1:
main_state.monster.hp -= 10
self.frame_x, self.frame_y, self.size_x, self.size_y = self.fire_animation_pos[self.frame]
self.frame = (self.frame+1) % 11
def Change_to_IDLE(self):
self.frame = 0
self.now_animation = IDLE_STATE
def Change_My_Turn(self):
if main_state.monster.hp <= 0:
return
elif self.my_turn is False:
self.my_turn = True
else:
self.my_turn = False
main_state.monster.Change_My_Turn()
main_state.monster.My_Next_Action()
def My_Turn_is_Now(self):
return self.my_turn
def Use_Possible(self, card):
if card is 1:
if self.on_tile is 4 or self.on_tile is 8 or self.on_tile is 12:
return False
if card is 2:
if self.on_tile is 1 or self.on_tile is 5 or self.on_tile is 9:
return False
if card is 3:
for i in range(1, 5):
if self.on_tile is i:
return False
if card is 4:
for i in range(9, 13):
if self.on_tile is i:
return False
if card is 9:
if self.mp.mp < 3:
return False
if card is 10:
if self.mp.mp < 5:
return False |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
DIGIT=23
GPIO.setup(DIGIT, GPIO.IN)
def read_sensor() :
digit_val=GPIO.input(DIGIT)
return digit_val
|
""" General utility methods
"""
import os
import click
import json
def extract_path(path):
""" Extract the path to path and basename
"""
dir_name = os.path.dirname(path)
dir_name = prepend_slash(dir_name)
dir_name = append_slash(dir_name)
base_name = os.path.basename(path)
return (dir_name, base_name)
def prepend_slash(path):
""" Add a slash in front
"""
return path if path.startswith('/') else f'/{path}'
def append_slash(path):
""" Add a slash at the end
"""
return path if path.endswith('/') else f'{path}/'
def get_stdin():
""" Get std input as a list
"""
results = []
raw = click.get_text_stream('stdin').read()
if raw:
data = json.loads(raw)
if isinstance(data, (list, tuple)):
results.extend(data)
else:
results.append(data)
return results
|
import os
import glob
import pandas as pd
import numpy as np
def build_primary_pset_tables(pset_dict, pset_name):
"""
Build the tissue, drug, and gene tables for a PSet and return them
in a dictionary, with table names as the keys.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@param pset_name: [`string`] The name of the PSet
@return: [`dict`] A dictionary of primary tables, with table names as keys
"""
pset_dfs = {}
pset_dfs['dataset'] = pd.Series(pset_name, name='name')
pset_dfs['tissue'] = build_tissue_df(pset_dict)
pset_dfs['drug'] = build_drug_df(pset_dict)
pset_dfs['drug_annotation'] = build_drug_annotation_df(pset_dict)
pset_dfs['cell'] = build_cell_df(pset_dict)
# Don't make gene table if there are no molecular profiles (TODO - check with chris)
if 'molecularProfiles' in pset_dict:
pset_dfs['gene'] = build_gene_df(pset_dict)
pset_dfs['gene_annotation'] = build_gene_annotation_df(pset_dict)
return pset_dfs
def build_gene_df(pset_dict):
"""
Build a table containing all genes in a dataset.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] The gene table
"""
gene_df = pd.Series([], name='name', dtype='str')
for mDataType in pset_dict['molecularProfiles']:
gene_df = gene_df.append(pd.Series(pd.unique(
pset_dict['molecularProfiles'][mDataType]['rowData']['.features']),
name='name', dtype='str'))
# Many ENSEMBL gene IDs have the version (ex. ENST00000456328.2 instead
# of ENST00000456328); remove all version numbers
gene_df.replace('\.[0-9]$', '', regex=True, inplace=True)
gene_df.drop_duplicates(inplace=True)
return gene_df
def build_tissue_df(pset_dict):
"""
Build a table containing all tissues in a dataset.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] The tissue table
"""
tissue_df = pd.Series(
pd.unique(pset_dict['cell']['tissueid']), name='name')
return tissue_df
def build_drug_df(pset_dict):
"""
Build a table containing all drugs in a dataset.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] The drug table
"""
drug_df = pd.Series(pd.unique(pset_dict['drug']['drugid']), name='name')
return drug_df
def build_gene_annotation_df(pset_dict):
"""
Build a table mapping each gene in a dataset to its gene annotations.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] A table of all gene annotations, mapped to genes
"""
gene_annotation_df = pd.DataFrame(
columns=['gene_id', 'symbol', 'gene_seq_start', 'gene_seq_end'], dtype='str')
for mDataType in pset_dict['molecularProfiles']:
df = pset_dict['molecularProfiles'][mDataType]['rowData'].copy()
# Get gene annotation columns
cols = ['.features']
if 'Symbol' in df.columns:
cols.append('Symbol')
df = df[cols]
df.rename(columns={'.features': 'gene_id',
'Symbol': 'symbol'}, inplace=True)
gene_annotation_df = gene_annotation_df.append(df)
# Remove all ENSEMBL gene id version numbers (ex. ENST00000456328.2 instead of ENST00000456328)
gene_annotation_df['gene_id'].replace('\.[0-9]$', '',
regex=True, inplace=True)
gene_annotation_df.drop_duplicates(subset=['gene_id'], inplace=True)
return gene_annotation_df
def build_drug_annotation_df(pset_dict):
"""
Build a table mapping each drug in a dataset to its drug annotations.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] A table of all drug annotations, mapped to drugs
"""
# Make drug_annotations df
drug_annotation_df = pset_dict['drug'][[
'rownames', 'smiles', 'inchikey', 'cid', 'FDA']].copy()
drug_annotation_df.rename(
columns={'rownames': 'drug_id', 'cid': 'pubchem', 'FDA': 'fda_status'}, inplace=True)
return drug_annotation_df
# TODO - confirm that you're using the correct cell id
def build_cell_df(pset_dict):
"""
Build a table containing all the cells in a dataset, mapped to their tissues.
@param pset_dict: [`dict`] A nested dictionary containing all tables in the PSet
@return: [`DataFrame`] A table of all cell lines, mapped to tissues
"""
cell_df = pset_dict['cell'][['cellid', 'tissueid']].copy()
cell_df.rename(columns={'cellid': 'name',
'tissueid': 'tissue_id'}, inplace=True)
return cell_df
|
class BaseVocoder:
def __init__(self, device):
self._device = device
def synthesize(self, mel):
raise NotImplementedError("Please subclass!") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.