text string | size int64 | token_count int64 |
|---|---|---|
from flask import abort, Flask, jsonify
import sqlite3
import re
from Suomipelit.jsonencoder import OmaEncoder
from Suomipelit.models import Peli, Peliarvostelu, Kappale, Kuva
app = Flask(__name__)
app.json_encoder = OmaEncoder
@app.route("/api/pelit")
def pelit():
return jsonify(lataa_pelit())
def lataa_pelit():
connection = sqlite3.connect("suomipelit.db")
connection.row_factory = sqlite3.Row
c = connection.cursor()
pelit = []
for pelirivi in c.execute("SELECT * FROM pelit order by id asc LIMIT 0,5"):
peli = muodostaPeli(pelirivi, c)
pelit.append(peli)
# print(kappaleet)
return pelit
@app.route("/api/pelit/<id>")
def peli(id):
#id voi olla vain numeroita
clean_id = int(id)
peli = lataa_peli(clean_id)
if peli is not None:
return jsonify(peli)
abort(404)
def lataa_peli(id):
connection = sqlite3.connect("suomipelit.db")
connection.row_factory = sqlite3.Row
c = connection.cursor()
c.execute("select * from pelit where id = ?", (id,))
peli = c.fetchone()
if peli is not None:
return muodostaPeli(peli, connection)
return None
def muodostaPeli(pelirivi, connection):
peli = Peli(pelirivi["id"])
peli.nimi = pelirivi["nimi"]
peli.tekija = pelirivi["tekija"]
peli.url = pelirivi["url"]
peli.kuvaus = pelirivi["kuvaus"]
peli.vaatimukset = pelirivi["vaatimukset"]
pelikuva = Kuva(pelirivi["id"])
pelikuva.asemointi = None
pelikuva.kuvateksti = None
if pelirivi["kuva_iso"] != None and len(pelirivi["kuva_iso"]) > 0:
pelikuva.tiedosto = pelirivi["kuva_iso"]
else:
pelikuva.tiedosto = pelirivi["kuva"]
peli.kuva = pelikuva
if pelirivi["uusittu"] == 1:
arvostelu = Peliarvostelu()
arvostelu.julkaistu = pelirivi["paivays"]
arvostelu.kirjoittaja = pelirivi["user"]
kappaleet = []
for rivi in connection.cursor().execute("SELECT * FROM kappale where artikkeli_id = ? and kaytto='PELI' order by artikkeli_id asc, sivu asc, jarjestys", (pelirivi["id"],)):
kappale = Kappale(rivi["id"], rivi["otsikko"], rivi["teksti"])
kappale.artikkeliId = rivi["artikkeli_id"]
kappale.sivu = rivi["sivu"]
if len(rivi["kuva"]) > 0:
kuva = Kuva(rivi["id"])
if rivi["kuva_iso"] != None and len(rivi["kuva_iso"]) > 0:
kuva.tiedosto = rivi["kuva_iso"]
else:
kuva.tiedosto = rivi["kuva"]
kuva.asemointi = rivi["asemointi"]
kuva.kuvateksti = rivi["kuvateksti"]
else:
kuva = None
kappale.kuva = kuva
kappaleet.append(kappale)
arvostelu.kappaleet = kappaleet
peli.arvostelu = arvostelu
else:
peli.arvostelu = None
return peli
| 2,898 | 1,133 |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
#
# Copyright (c) 2021-present SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import pathlib
import click
import ices_erf32_generator_main
global ices_config
@click.command()
@click.option(
"--row",
default=0,
prompt="Execute row",
help="Row number used to select which YAML-file to generate ICES-Erf32 from.",
)
def run_erf32_generator_command(row):
""" """
global ices_erf32_config
if (row < 0) or (row > len(ices_erf32_config)):
print("\n\nERROR: Wrong value. Please try again.\n\n")
return
generator = ices_erf32_generator_main.IcesErf32Generator()
if row == 0:
for config_file in ices_erf32_config:
generator.generate_erf32(config_file)
else:
generator.generate_erf32(ices_erf32_config[row - 1])
if __name__ == "__main__":
""" """
global ices_erf32_config
ices_erf32_config = []
for file_path in pathlib.Path("erf32_config").glob("ices_erf32_*.yaml"):
ices_erf32_config.append(str(file_path))
ices_erf32_config = sorted(ices_erf32_config)
# Print before command.
print("\n\nICES ERF 3.2 generator.")
print("-----------------------------")
print("Select row number. Press enter to run all.")
print("Press Ctrl-C to terminate.\n")
for index, row in enumerate(ices_erf32_config):
print(index + 1, " ", row)
print("")
# Execute command.
run_erf32_generator_command()
| 1,563 | 570 |
# https://leetcode.com/problems/missing-number/
# ---------------------------------------------------
from typing import List
# Runtime Complexity: O(N)
# Space Complexity: O(1)
# Idea: XOR with all numbers in range [0, n] and XOR with all nums.
class Solution:
def missingNumber(self, nums: List[int]) -> int:
res = len(nums)
n = len(nums)
for i in range(n):
res ^= nums[i] ^ i
return res
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# 2
print(solution.missingNumber([3, 0, 1]))
# 8
print(solution.missingNumber([9, 6, 4, 2, 3, 5, 7, 0, 1]))
# 1
print(solution.missingNumber([0]))
| 752 | 247 |
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import numpy as np
from module_easyModel import EasyModel
from module_list import get_test_module
import pytest
test_modules = get_test_module()
transform = transforms.Compose(
[transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, ))])
trainset = torchvision.datasets.MNIST(root='./data',
train=True,
download=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=100,
shuffle=True,
num_workers=2)
testset = torchvision.datasets.MNIST(root='./data',
train=False,
download=True,
transform=transform)
testloader = torch.utils.data.DataLoader(testset,
batch_size=100,
shuffle=False,
num_workers=2)
classes = tuple(np.linspace(0, 9, 10, dtype=np.uint8))
criterion = nn.CrossEntropyLoss()
@pytest.mark.parametrize("mode", ["normal","residual","dense"])
@pytest.mark.parametrize("test_module", test_modules)
def test_train_model(test_module,mode):
print("start testing")
net = EasyModel(1,10,test_module,mode=mode).to("cuda")
optimizer = optim.Adam(net.parameters(), lr=0.01)
for epoch in range(2):
running_loss = 0.0
for i, (inputs, labels) in enumerate(trainloader, 0):
inputs = inputs.to("cuda")
labels = labels.to("cuda")
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print('[{:d}, {:5d}] loss: {:.3f}'
.format(epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for (images, labels) in testloader:
images = images.cuda()
labels = labels.cuda()
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy: {:.2f} %%'.format(100 * float(correct / total)))
assert float(correct / total)>0.25
| 2,790 | 816 |
import numpy as np
import scipy.stats as st
import statsmodels as sm
from scipy import optimize
y = np.random.randint(2, size=(100,1))
x = np.random.normal(0,1,(100,2))
res_correct = sm.discrete.discrete_model.Logit(y,x).fit()
res_correct.params
def Logit(b,y,x):
# y = np.random.randint(2, size=(100,1))
# x = np.random.normal(0,1,(100,2))
n = x.shape[0]
# b = np.zeros((s,1))
# log_likelihood = (y.T @ x @ b)[0] - np.log(1 + np.exp(x.T @ b))
log_likelihood = -y.T @ np.log(1 + np.exp(-x @ b)) + (np.ones((n,1)) - y).T @ np.log(1 - 1 / (1 + np.exp(- x @ b)))
return -log_likelihood[0]
Logit(y,x,np.array((2,1)))
s = x.shape[1]
b_0 = np.array((0,0))
optimize.minimize(Logit,x0=b_0,args=(y,x))
optimize.fmin_bfgs(Logit, b_0,args=(y,x,))
y.shape
# def OLS(y,x,cf=0.95):
# """
# OLS estimation.
#
# Parameters
# −−−−−−−−−−
# y : Dependent variable
# x : Explanatory variable
# cf: Confidence level
#
# Returns
# −−−−−−−
# beta : Beta
# se: Standard Error
# confidence: Confidence Interval
#
# See Also
# −−−−−−−−
# other_function : This is a related function
# """
#
# beta = np.linalg.inv(x.T @ x) @ (x.T @ y)
#
# se_term1 = ((y - x @ beta).T @ (y - x @ beta)) / (x.shape[0] - 1)
# se_term2 = x.T @ x
# cov_matrix = se_term1 * se_term2
# se = np.sqrt(np.diag(cov_matrix))
#
# confidence = [beta - st.norm.ppf(1 - (1-0.95)/2) * se, beta \
# + st.norm.ppf(1 - (1-0.95)/2) * se]
#
# return {"Beta":beta, "Standard Error":se, "Confidence Interval":confidence}
| 1,592 | 732 |
from enum import Enum
class NumberSystem(Enum):
BINARY = 2
OCTAL = 8
DECIMAL = 10
HEXADECIMAL = 16 | 116 | 54 |
import os
import random
import pickle
import datetime
import torch
import numpy as np
from PIL import Image
from model import Model
from train_data import TrainData
from loss_train_data import get_loss_train_data
class ModelProcessor():
def __init__(self, path):
self.path = path
self.device = torch.device('cuda')
self.model = Model(self.device)
self._load_model()
def train_frames(self):
if not self._loss_trained:
raise os.error('Loss has not been trained yet (call ModelProcessor.train_loss())')
for x, y, _ in ModelProcessor._train_frames_iter(300000, 1):
epoch = self._epoch
loss = self.model.train_frame(x, y)
print(f'{datetime.datetime.now()} train_frame epoch {epoch} loss={loss}')
self._epoch = self._epoch + 1
if (epoch % 500) == 0:
self._checkpoints[epoch] = {
'epoch': epoch,
'loss': loss
}
self.model.save(self._path(f'ckpt-{epoch}.pt'))
self._save_model()
self._process_sample_images()
def train_loss(self):
if self._loss_trained:
raise os.error('Loss has already been trained on this model')
for x, y, epoch in ModelProcessor._train_loss_iter(400, 4):
loss = self.model.train_loss(x, y)
print(f'{datetime.datetime.now()} train_loss epoch {epoch} loss={loss}')
self._loss_trained = True
self._checkpoints[1] = {
'epoch': 1,
'loss': None
}
self.model.save(self._path(f'ckpt-1.pt'))
self._save_model()
def _load_model(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
if not os.path.exists(self._path('index')):
self._loss_trained = False
self._epoch = 1
self._checkpoints = { }
self._save_model()
else:
with open(self._path('index'), 'rb') as f:
mdata = pickle.load(f)
self._loss_trained = mdata['loss_trained']
self._epoch = mdata['epoch']
self._checkpoints = mdata['checkpoints']
if len(self._checkpoints) > 0:
latest_checkpoint = max(self._checkpoints)
ckpt_path = self._path(f'ckpt-{latest_checkpoint}.pt')
if os.path.exists(ckpt_path):
self.model.load(ckpt_path)
else:
self.model.load(self._path('ckpt-1.pt'))
self._epoch = 1
self._checkpoints = {
1: {'epoch': 1, 'loss': None}
}
def _save_model(self):
with open(self._path('index'), 'wb') as f:
mdata = {
'loss_trained': self._loss_trained,
'epoch': self._epoch,
'checkpoints': self._checkpoints
}
pickle.dump(mdata, f)
def _path(self, *paths):
return os.path.join(self.path, *paths)
def _train_frames_iter(num_batches, batch_size):
def _train_frames_iter_singles():
for i in range(0, batch_size * num_batches):
td = TrainData.get_random()
x = td.get_train_image()
y = td.get_next_train_image()
yield (x, y, i)
xs = []
ys = []
for x, y, i in _train_frames_iter_singles():
xs.append(x[0])
ys.append(y[0])
if len(xs) >= batch_size:
epoch = int((i + 1) / batch_size)
yield (np.array(xs), np.array(ys), epoch)
xs = []
ys = []
def _train_loss_iter(num_batches, batch_size):
def _train_loss_iter_singles():
for i in range(0, batch_size * num_batches):
g = random.randint(0, 1)
if g == 0:
x = TrainData.get_random().get_train_image()
y = 0
else:
x = get_loss_train_data()
y = 1
yield (x, y, i)
xs = []
ys = []
for x, y, i in _train_loss_iter_singles():
xs.append(x[0])
ys.append(y)
if len(xs) >= batch_size:
epoch = int((i + 1) / batch_size)
yield (np.array(xs), np.array(ys), epoch)
xs = []
ys = []
def _process_sample_images(self):
""" Processes images in the '.data/model_sample_inputs' directory through the model, each with 5 samples """
model = self.model
epoch = self._epoch
for img in os.listdir('.data/model_sample_inputs'):
sample_outputs = self._path('sample_outputs')
if not os.path.exists(sample_outputs):
os.mkdir(sample_outputs)
out_dir = self._path(f'sample_outputs', img)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
print(f'process sample {img}')
try:
x = Image.open(f'.data/model_sample_inputs/{img}')
x.load()
x.save(f'{out_dir}/{epoch}-0.png')
x = TrainData.preprocess_pil_image(x)
max_iters = 4
for i in range(1, max_iters + 1):
x = model.get_frame(x)
y = TrainData.postprocess_pil_image(x)
y.save(f'{out_dir}/{epoch}-{i}.png')
y.close()
print(f'process sample {img} completed {i}/{max_iters}')
except Exception as e:
print(f'exception processing sample {img} {e}')
pass | 5,847 | 1,801 |
"""
The assertions module contains classic unittest-style assert statements.
"""
from pedal.assertions.setup import _setup_assertions, resolve_all
from pedal.assertions.constants import TOOL_NAME
from pedal.core.report import Report, MAIN_REPORT
from pedal.assertions.commands import *
def reset(report=MAIN_REPORT):
"""
Resets (or initializes) the information about assertions.
Args:
report:
"""
report[TOOL_NAME] = {
'failures': 0,
'exceptions': False
}
Report.register_tool(TOOL_NAME, reset)
| 549 | 172 |
# print all prime numbers in a range with a generator function in python
#that is an utility function
def isprime(n):
if n == 1:
return False
for x in range(2, n):
if n % x == 0:
return False
else:
return True
#generator function is used in the for loop as an iterator
#this function return an iterator object
def primes(n = 1):
while (True):
if isprime(n): yield n #yield makes tihs a generator
n += 1
#for loop use primes function as an iterator
for n in primes():
if n > 100: break
print(n)
| 573 | 175 |
import json
from firebase_admin import _http_client, messaging
from firebase_admin.credentials import Base
from google.auth.credentials import Credentials
from requests import adapters, models
FIREBASE_AUTH_CREDENTIALS = {
"type": "service_account",
"project_id": "mock-project-id",
"private_key_id": "mock-key-id-1",
"private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAwJENcRev+eXZKvhhWLiV3Lz2MvO+naQRHo59g3vaNQnbgyduN/L4krlr\nJ5c6FiikXdtJNb/QrsAHSyJWCu8j3T9CruiwbidGAk2W0RuViTVspjHUTsIHExx9euWM0Uom\nGvYkoqXahdhPL/zViVSJt+Rt8bHLsMvpb8RquTIb9iKY3SMV2tCofNmyCSgVbghq/y7lKORt\nV/IRguWs6R22fbkb0r2MCYoNAbZ9dqnbRIFNZBC7itYtUoTEresRWcyFMh0zfAIJycWOJlVL\nDLqkY2SmIx8u7fuysCg1wcoSZoStuDq02nZEMw1dx8HGzE0hynpHlloRLByuIuOAfMCCYwID\nAQABAoIBADFtihu7TspAO0wSUTpqttzgC/nsIsNn95T2UjVLtyjiDNxPZLUrwq42tdCFur0x\nVW9Z+CK5x6DzXWvltlw8IeKKeF1ZEOBVaFzy+YFXKTz835SROcO1fgdjyrme7lRSShGlmKW/\nGKY+baUNquoDLw5qreXaE0SgMp0jt5ktyYuVxvhLDeV4omw2u6waoGkifsGm8lYivg5l3VR7\nw2IVOvYZTt4BuSYVwOM+qjwaS1vtL7gv0SUjrj85Ja6zERRdFiITDhZw6nsvacr9/+/aut9E\naL/koSSb62g5fntQMEwoT4hRnjPnAedmorM9Rhddh2TB3ZKTBbMN1tUk3fJxOuECgYEA+z6l\neSaAcZ3qvwpntcXSpwwJ0SSmzLTH2RJNf+Ld3eBHiSvLTG53dWB7lJtF4R1KcIwf+KGcOFJv\nsnepzcZBylRvT8RrAAkV0s9OiVm1lXZyaepbLg4GGFJBPi8A6VIAj7zYknToRApdW0s1x/XX\nChewfJDckqsevTMovdbg8YkCgYEAxDYX+3mfvv/opo6HNNY3SfVunM+4vVJL+n8gWZ2w9kz3\nQ9Ub9YbRmI7iQaiVkO5xNuoG1n9bM+3Mnm84aQ1YeNT01YqeyQsipP5Wi+um0PzYTaBw9RO+\n8Gh6992OwlJiRtFk5WjalNWOxY4MU0ImnJwIfKQlUODvLmcixm68NYsCgYEAuAqI3jkk55Vd\nKvotREsX5wP7gPePM+7NYiZ1HNQL4Ab1f/bTojZdTV8Sx6YCR0fUiqMqnE+OBvfkGGBtw22S\nLesx6sWf99Ov58+x4Q0U5dpxL0Lb7d2Z+2Dtp+Z4jXFjNeeI4ae/qG/LOR/b0pE0J5F415ap\n7Mpq5v89vepUtrkCgYAjMXytu4v+q1Ikhc4UmRPDrUUQ1WVSd+9u19yKlnFGTFnRjej86hiw\nH3jPxBhHra0a53EgiilmsBGSnWpl1WH4EmJz5vBCKUAmjgQiBrueIqv9iHiaTNdjsanUyaWw\njyxXfXl2eI80QPXh02+8g1H/pzESgjK7Rg1AqnkfVH9nrwKBgQDJVxKBPTw9pigYMVt9iHrR\niCl9zQVjRMbWiPOc0J56+/5FZYm/AOGl9rfhQ9vGxXZYZiOP5FsNkwt05Y1UoAAH4B4VQwbL\nqod71qOcI0ywgZiIR87CYw40gzRfjWnN+YEEW1qfyoNLilEwJB8iB/T+ZePHGmJ4MmQ/cTn9\nxpdLXA==\n-----END RSA PRIVATE KEY-----",
"client_email": "mock-email@mock-project.iam.gserviceaccount.com",
"client_id": "1234567890",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/mock-project-id.iam.gserviceaccount.com"
}
FIREBASE_TOKEN = 'mock-token'
class MockHttpClient(_http_client.HttpClient):
def request(self, method, url, **kwargs):
return kwargs['json']
def parse_body(self, resp):
resp.update({'name': 'test-name'})
return resp
class MockGoogleCredential(Credentials):
"""A mock Google authentication credential."""
def refresh(self, request):
self.token = 'mock-token'
class MockCredential(Base):
"""A mock Firebase credential implementation."""
def __init__(self):
self._g_credential = MockGoogleCredential()
def get_credential(self):
return self._g_credential
class MockAdapter(adapters.HTTPAdapter):
"""A mock HTTP adapter for the Python requests module."""
def __init__(self, data, status, recorder):
adapters.HTTPAdapter.__init__(self)
self._data = data
self._status = status
self._recorder = recorder
def send(self, request, **kwargs):
request._extra_kwargs = kwargs
self._recorder.append(request)
resp = models.Response()
resp.url = request.url
resp.status_code = self._status
resp.raw = self._data.encode()
return resp
def send_message(*args, message_instance, data, token):
message = messaging.Message(
data=data,
token=token
)
response = message_instance.send(message)
return response
class MessagingService:
_DEFAULT_RESPONSE = json.dumps({'name': 'message-id'})
def __init__(self, app, *args, **kwargs):
# self.fcm_service = messaging._get_messaging_service(app)
# self.fcm_service._client.session.mount(
# 'https://fcm.googleapis.com',
# MockAdapter(json.dumps({'name': 'message-id'}), 200, self.recorder)
# )
# super(MessagingService, self).__init__(app, *args, **kwargs)
self.fcm_service, self.recorder = self._instrument_messaging_service(app)
self.session = self.fcm_service._client.session
self._client = MockHttpClient(session=self.session)
self._fcm_url = 'https://fcm.googleapis.com/v1/projects/{0}/messages:send'.format(app.project_id)
self._timeout = app.options.get('httpTimeout')
def _instrument_messaging_service(self, app, status=200, payload=_DEFAULT_RESPONSE):
fcm_service = messaging._get_messaging_service(app)
recorder = []
fcm_service._client.session.mount(
'https://fcm.googleapis.com',
MockAdapter(payload, status, recorder)
)
return fcm_service, recorder
def send(self, message, dry_run=False):
data = {'message': messaging._MessagingService.encode_message(message)}
if dry_run:
data['validate_only'] = True
resp = self._client.body('post', url=self._fcm_url, json=data, timeout=self._timeout)
return resp['name']
| 5,373 | 2,547 |
"""
Module responsible for discovery of import statements importing Argument parser
and discovery of the statements initializing the parser itself
"""
import ast
import sys
from typing import Tuple, Optional, Any, Set, List
from .parsing_exceptions import ArgParseImportNotFound, ArgParserNotUsed
from .parsing_commons import Discovery
ARGPARSE_MODULE_NAME = "argparse"
ARGUMENT_PARSER_CLASS_NAME = "ArgumentParser"
class ImportDiscovery(Discovery):
"""
Class responsible for discovery and extraction of import statements
"""
def __init__(self, actions: List[ast.AST]):
super(ImportDiscovery, self).__init__(actions)
self.argparse_module_alias: Optional[str] = None
self.argument_parser_alias: Optional[str] = None
def visit_Import(self, node: ast.Import) -> Any:
for item in node.names:
if item.name == ARGPARSE_MODULE_NAME:
alias = item.asname if item.asname is not None \
else ARGPARSE_MODULE_NAME
self.argparse_module_alias = alias
self.actions.append(node)
return
# stdlib modules should be also imported during this step
if item.name in sys.stdlib_module_names:
self.actions.append(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> Any:
if node.module is None:
return
for name in node.module.split("."):
if name in sys.stdlib_module_names and name != \
ARGPARSE_MODULE_NAME:
self.actions.append(node)
return
if ARGPARSE_MODULE_NAME not in node.module:
return
for item in node.names:
if item.name == ARGUMENT_PARSER_CLASS_NAME:
alias = item.asname if item.asname is not None \
else ARGUMENT_PARSER_CLASS_NAME
self.argument_parser_alias = alias
self.actions.append(node)
return
# stdlib modules should be also imported during this step
def report_findings(self) -> Tuple:
if self.argparse_module_alias is None and \
self.argument_parser_alias is None:
raise ArgParseImportNotFound
return (self.actions, self.argparse_module_alias,
self.argument_parser_alias)
class ParserDiscovery(Discovery):
"""
Class responsible for discovery of ArgumentParser creation and assignment
"""
class ParserRenameFinder(ast.NodeVisitor):
def __init__(self, func_name: str):
self.func_name = func_name
self.arg_pos: Optional[int] = None
self.keyword = Optional[str] = None
def find_by_argument_pos(self, tree: ast.AST, n: int):
self.arg_pos = n
self.keyword = None
self.visit(tree)
def __init__(self, actions: List[ast.AST], argparse_alias: Optional[str],
argument_parser_alias: Optional[str]):
self.argument_parser_alias = argument_parser_alias
self.argparse_module_alias = argparse_alias
self.main_parser_name: Optional[str] = None
super(ParserDiscovery, self).__init__(actions)
# checks whether this assignment creates argument parser,
# and removes any arguments from the constructor,
# because they should not be needed
def is_this_argparse(self, node: ast.Assign) -> \
Tuple[bool, Optional[str]]:
if not (len(node.targets) == 1 and
isinstance(node.targets[0], ast.Name)):
return False, None
name = node.targets[0].id
# ArgumentParser was imported using from ... import
if (isinstance(node.value, ast.Call) and
isinstance(node.value.func, ast.Name) and
node.value.func.id == self.argument_parser_alias):
node.value.keywords = []
node.value.args = []
return True, name
# ArgumentParser is created using attribute call on imported module
if (isinstance(node.value, ast.Call) and
isinstance(node.value.func, ast.Attribute) and
node.value.func.attr == ARGUMENT_PARSER_CLASS_NAME and
node.value.func.value.id == self.argparse_module_alias):
node.value.args = []
node.value.keywords = []
return True, name
return False, None
def visit_Assign(self, node: ast.Assign):
# visit into children of this node is not necessary
is_argparse, name = self.is_this_argparse(node)
if is_argparse:
self.main_parser_name = name
self.actions.append(node)
def report_findings(self) -> Tuple:
if self.main_parser_name is None:
raise ArgParserNotUsed
return self.actions, self.main_parser_name
# this visitor class goes through the tree and tries to find creation of
# all argument groups
# it works only if the group is assigned a name
# (is created as a normal variable)
class GroupDiscovery(Discovery):
"""
Class responsible for discovery of statements that initialize argument
groups
"""
def __init__(self, actions: List[ast.AST], main_name: str):
self.main_name = main_name
self.groups = set()
super(GroupDiscovery, self).__init__(actions)
@staticmethod
def is_this_group_creation(node: ast.Assign):
if not (len(node.targets) == 1 and
isinstance(node.targets[0], ast.Name)):
return False, None
name = node.targets[0].id
if not (isinstance(node.value, ast.Call) and
isinstance(node.value.func, ast.Attribute) and
node.value.func.attr == "add_argument_group"):
return False, None
return True, name
def visit_Assign(self, node: ast.Assign):
is_group_creation, name = self.is_this_group_creation(node)
if is_group_creation:
self.groups.add(name)
self.actions.append(node)
def report_findings(self) -> Tuple:
return self.actions, self.main_name, self.groups
# # this visitor goes through all calls and extracts those to argument
# parser and groups. IMPORTANT! it also renames parsers on which those calls
# are called to ensure everything can be interpreted correctly
class ArgumentCreationDiscovery(Discovery):
"""
Class responsible for extraction of statements which initialize the input
arguments. It is able to extract function calls on the original parser,
and on the argument groups extracted by GroupDiscovery
"""
def __init__(self, actions: List[ast.AST], main_name: str,
groups: Set[str]):
self.main_name = main_name
self.sections = groups
super(ArgumentCreationDiscovery, self).__init__(actions)
def is_call_on_parser_or_group(self, node: ast.Call):
return isinstance(node.func, ast.Attribute) and \
node.func.attr == "add_argument" and \
(node.func.value.id in self.sections or
node.func.value.id ==self.main_name)
def visit_Call(self, node: ast.Call) -> Any:
if self.is_call_on_parser_or_group(node):
assert isinstance(node.func, ast.Attribute)
# name of the variable needs to be rewritten,
# because we want to use only one parser
if node.func.value.id != self.main_name and \
node.func.value.id not in self.sections:
node.func.value.id = self.main_name
self.actions.append(ast.Expr(node))
self.generic_visit(node)
def report_findings(self) -> Tuple:
return self.actions, self.main_name, self.sections
def get_parser_init_and_actions(source: ast.Module) -> \
Tuple[List[ast.AST], str, Set[str]]:
"""
Function used to extract necessary imports, parser and argument creation
function calls
Parameters
----------
source : ast.Module
source file parsed into ATT
Returns
-------
List of extracted AST nodes, the main name of the parser and a set of
section names
"""
discovery_classes = [ImportDiscovery, ParserDiscovery,
GroupDiscovery, ArgumentCreationDiscovery]
findings = [],
for cls in discovery_classes:
discovery = cls(*findings)
discovery.visit(source)
findings = discovery.report_findings()
actions, main_name, sections = findings
return actions, main_name, sections
| 8,577 | 2,394 |
from django.urls import path
from . import views
# This should contain urls related to OVC ONLY
urlpatterns = [
path('', views.pmtct_home, name='pmtct_home'),
path('new/<int:id>/', views.new_pmtct, name='new_pmtct'),
path('view/<int:id>/', views.view_pmtct, name='view_pmtct'),
path('edit/<int:id>/', views.edit_pmtct, name='edit_pmtct'),
]
| 358 | 139 |
# Copyright 2017 Alexey Stepanov aka penguinolog
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config file editors."""
from __future__ import absolute_import
from .config_editor import ConfigEditor
from .json_editor import JsonEditor
from .yaml_editor import YamlEditor
__all__ = (
"ConfigEditor",
"JsonEditor",
"YamlEditor"
)
__version__ = '0.9.2'
__author__ = "Alexey Stepanov <penguinolog@gmail.com>"
| 954 | 296 |
import pyo
from settings import audioSource
s = pyo.Server(audio=audioSource, nchnls=1).boot()
s.start()
a = pyo.Input(chnl=0).out()
delay = pyo.Delay(a, delay=.5, feedback=.5)
delay.out()
while True:
s = raw_input('Delay');
if s == "q":
quit()
delay.setDelay(float(s))
#s.gui(locals()) | 311 | 125 |
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_integer(value):
if type(value) is not int:
raise ValidationError(
_('%(value)s is not an even number'),
params={'value': value},
)
| 297 | 85 |
from destryseuler import p1
def test_p1_answer():
assert p1.answer(10) == 23
def test_brute():
assert p1.natural_3and5_brute(10) == 23
def test_lambda():
assert p1.natural_3and5_lambda(10) == 23
assert p1.natural_3and5_lambda(1000) == 233168
| 261 | 120 |
import numpy as np
import cv2
import os
import keras
import tensorflow as tf
import random
import matplotlib.pyplot as plt
from glob import glob
from keras import layers
from keras.backend.tensorflow_backend import set_session
from tensorflow.python.client import device_lib
input_dir_1 = "unet/images/"
target_dir_1 = "unet/target/"
input_dir_2= "data/images/"
target_dir_2 = "data/target/"
img_size = (32, 32)
num_classes = 2
batch_size = 32
input_img_paths_1 = sorted(glob(os.path.join(input_dir_1, '*' + '.png')))
target_img_paths_1 = sorted(glob(os.path.join(target_dir_1, '*' + '.png')))
input_img_paths_2 = sorted(glob(os.path.join(input_dir_2, '*' + '.png')))
target_img_paths_2 = sorted(glob(os.path.join(target_dir_2, '*' + '.png')))
input_img_paths = input_img_paths_1 + input_img_paths_2
target_img_paths = target_img_paths_1 + target_img_paths_2
print("Number of samples:", len(input_img_paths))
for input_path, target_path in zip(input_img_paths[:10], target_img_paths[:10]):
print(input_path, "|", target_path)
class Patches(keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths = target_img_paths
self.current_batch = 0
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
#print(idx)
i = idx * self.batch_size
if i == 0:
data_zip_list = list(zip(self.input_img_paths, self.target_img_paths))
random.shuffle(data_zip_list)
self.input_img_paths, self.target_img_paths = zip(*data_zip_list)
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
for j, path in enumerate(batch_input_img_paths):
img = cv2.imread(path, cv2.IMREAD_COLOR)
n = np.random.randint(0, 3)
if n == 0:
img = cv2.blur(img, (3, 3)) / 255.
elif n == 1:
img = cv2.blur(img, (5, 5)) / 255.
else:
img = img / 255.
x[j] = img
y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="float32")
for j, path in enumerate(batch_target_img_paths):
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) * 1.
y[j] = np.expand_dims(img, 2)
return x, y
def get_model(img_size, num_classes):
inputs = keras.Input(shape=img_size)
### [First half of the network: downsampling inputs] ###
# Entry block
x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
# Blocks 1, 2, 3 are identical apart from the feature depth.
for filters in [64, 128, 256]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(filters, 1, strides=2, padding="same")(previous_block_activation)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
### [Second half of the network: upsampling inputs] ###
for filters in [256, 128, 64, 32]:
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.UpSampling2D(2)(x)
# Project residual
residual = layers.UpSampling2D(2)(previous_block_activation)
residual = layers.Conv2D(filters, 1, padding="same")(residual)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
# Add a per-pixel classification layer
outputs = layers.Conv2D(num_classes, 3, activation="sigmoid", padding="same")(x)
# Define the model
model = keras.Model(inputs, outputs)
return model
tf_config = tf.ConfigProto(device_count = {'GPU': 0})
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.7
tf_config.gpu_options.visible_device_list = "0"
set_session(tf.Session(config=tf_config))
# Free up RAM in case the model definition cells were run multiple times
#keras.backend.clear_session()
# Build model
model = get_model((32, 32, 3), 1)
#model.load_weights('oxford_segmentation.h5')
model.summary()
# Split our img paths into a training and a validation set
val_samples = int(0.2*len(input_img_paths))
data_zip_list = list(zip(input_img_paths, target_img_paths))
random.shuffle(data_zip_list)
input_img_paths, target_img_paths = zip(*data_zip_list)
train_input_img_paths = input_img_paths[:-val_samples]
train_target_img_paths = target_img_paths[:-val_samples]
val_input_img_paths = input_img_paths[-val_samples:]
val_target_img_paths = target_img_paths[-val_samples:]
# Instantiate data Sequences for each split
train_gen = Patches(batch_size, img_size, train_input_img_paths, train_target_img_paths)
val_gen = Patches(batch_size, img_size, val_input_img_paths, val_target_img_paths)
# Configure the model for training.
# We use the "sparse" version of categorical_crossentropy
# because our target data is integers.
opt = keras.optimizers.SGD()
model.compile(optimizer="SGD", loss="binary_crossentropy")
callbacks = [keras.callbacks.ModelCheckpoint("oxford_segmentation.h5", save_best_only=True)]
# Train the model, doing validation at the end of each epoch.
epochs = 10
hist = model.fit_generator(train_gen, epochs=epochs, validation_data=val_gen, callbacks=callbacks)
fig = plt.figure()
plt.plot(hist.history['loss'], label = 'Training value', color = 'darkslategray')
plt.plot(hist.history['val_loss'], label = 'Validation value', color = 'darkslategray', linestyle = '--')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('loss.pdf')
plt.close(fig) | 6,568 | 2,394 |
# -*- coding: utf-8 -*-
import scrapy
class Sprider(scrapy.Spider):
name = "zj"
start_urls = [
'https://www.wdzj.com/pingji.html'
]
def parse(self , response):
for quote in response.css('div.tb-platname'):
yield {
'name': quote.css('a::text').extract_first(),
} | 343 | 121 |
# -*- coding: utf-8 -*-
"""Help information management."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
import fnmatch
import re
from .utils.bases import Manager
class HelpSourceManager(Manager):
"""A manager for help source registration."""
def find(self, pattern):
"""Find one or more help entries matching a pattern.
:param str pattern: A pattern to match entries against
:returns list: A list of HelpEntry instances that match the pattern
"""
found = {}
for source in self._items.values():
for entry in source.find(pattern):
if entry.key not in found:
found[entry.key] = entry
return list(found.values())
class HelpSource:
"""A searchable source of help data."""
def __init__(self):
"""Create a new help source."""
self._entries = {}
def __contains__(self, key):
return key in self._entries
def __getitem__(self, key):
return self._entries[key]
def __setitem__(self, key, value):
self._entries[key] = value
def __delitem__(self, key):
del self._entries[key]
def __iter__(self):
return iter(self._entries)
def keys(self):
"""Return an iterator through this source's entry keys."""
return self._entries.keys()
def entries(self):
"""Return an iterator through this source's entries."""
return self._entries.values()
def find(self, pattern):
"""Find one or more help entries matching a pattern.
:param str pattern: A pattern to match entries against
:returns list: A list of HelpEntry instances that match the pattern
"""
pattern = re.compile(fnmatch.translate(pattern))
matches = []
for entry in self.entries():
for topic in entry.topics:
if pattern.match(topic):
matches.append(entry)
return matches
class HelpEntry:
"""A single entry of help information."""
def __init__(self, key, title, text):
"""Create a new help entry."""
self._key = key
self._related = set()
self._text = text
self._title = title
self._topics = set()
@property
def related(self):
"""Return this entry's related topics."""
return frozenset(self._related)
@property
def text(self):
"""Return this entry's text."""
return self._text
@property
def topics(self):
"""Return this entry's topic keywords."""
return frozenset(self._topics)
HELP_SOURCES = HelpSourceManager()
| 2,792 | 787 |
from django.db import models
from django import forms
from django.forms import ModelForm, TextInput, FileField, NumberInput
from .models import Extentions, Queue
class ExtentionsForm(ModelForm):
class Meta:
model = Extentions
fields = ['exten', 'file']
widgets = {'exten': NumberInput(
attrs={'class': 'form-control', 'placeholder': 'Short code'})}
class QueueForm(ModelForm):
class Meta:
model = Queue
fields = ['name', 'optin', 'exten']
widgets = {'optin': NumberInput(
attrs={'class': 'form-control', 'placeholder': '1'}), 'name': TextInput(
attrs={'class': 'form-control', 'placeholder': 'queue name'})}
| 706 | 209 |
from django.db import models
from clients.models import Client
# Create your models here.
class TherapyCenter(models.Model):
title = models.CharField(max_length=30)
location = models.CharField(max_length=30)
phone_no = models.CharField(max_length=15)
def __str__(self):
return self.title
class Therapist(models.Model):
name = models.CharField(max_length=30)
contact = models.CharField(max_length=15)
OT = models.IntegerField(choices=((1, "Yes"), (2, "No")))
PT = models.IntegerField(choices=((1, "Yes"), (2, "No")))
ST = models.IntegerField(choices=((1, "Yes"), (2, "No")))
def __str__(self):
return self.name
days = (
(1, "Monday"),
(2, "Tuesday"),
(3, "Wednesday"),
(4, "Thursday"),
(5, "Friday"),
(6, "Saturday"),
(7, "Sunday"),
)
class TherapistSchedule(models.Model):
therapist = models.ForeignKey(Therapist, on_delete=models.CASCADE)
day = models.IntegerField(choices=days)
start_time = models.TimeField()
end_time = models.TimeField()
therapy_center = models.ForeignKey(TherapyCenter, on_delete=models.CASCADE)
def __str__(self):
return f"{self.therapist}: {days[self.day-1][1]} ({self.start_time}-{self.end_time}) at {self.therapy_center}"
class TherapySlot(models.Model):
title = models.CharField(null=True, blank=True, max_length=30)
date = models.DateField()
start_time = models.TimeField()
end_time = models.TimeField()
therapist = models.ForeignKey(Therapist, on_delete=models.CASCADE)
therapy_type = models.IntegerField(
choices=((1, "OT"), (2, "PT"), (3, "ST")), null=True, blank=True
)
client = models.ForeignKey(Client, on_delete=models.CASCADE, null=True, blank=True)
status = models.IntegerField(choices=((1, "Available"), (2, "Booked")), default=1)
def __str__(self):
return f"Therapist: {self.therapist}, Client: {self.client}, {self.date} ({self.start_time}-{self.end_time})"
| 1,983 | 719 |
from onegov.core.utils import yubikey_public_id
def as_float(value):
return value and float(value) or 0.0
def strip_whitespace(value):
return value and value.strip(' \r\n') or None
def yubikey_identifier(value):
return value and yubikey_public_id(value) or ''
| 278 | 101 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.time_series_save),
path('<int:ts_id>/', views.time_series_detail),
path('<str:ts_type>/', views.time_series_list),
path('<str:ts_type>/generate/hold/', views.time_series_generate_hold),
path('<str:ts_type>/generate/ramp/', views.time_series_generate_ramp),
path('<str:ts_type>/generate/sine/', views.time_series_generate_sine),
]
| 435 | 157 |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
class SwapNoise(BaseEstimator, TransformerMixin):
def __init__(self, ratio=.15, random_seed=123):
self.seed = random_seed
self.ratio = ratio
def fit(self):
return self
def transform(self, input_data):
x = np.zeros(np.shape(input_data))
np.random.seed(self.seed)
for c in range(np.shape(input_data)[1]):
c_ = np.array(input_data)[:, c]
x[:, c] = self.partial_transform(c_)
return x
def partial_transform(self, x):
x_ = np.copy(x)
swap_idx = np.where(np.random.rand(len(x)) < self.ratio)[0]
np.put(x_, swap_idx, np.random.choice(x, len(swap_idx)))
return x_
def fit_transform(self, input_data):
self.fit()
return self.transform(input_data) | 912 | 308 |
"""Test functions related to parsing of LimeSurvey files"""
import unittest
from bs4 import BeautifulSoup
from n2survey.lime.structure import ( # TODO: test _get_clean_string,; TODO: test _get_question_group_name,
_parse_question,
_parse_question_description,
_parse_question_responses,
_parse_question_subquestions,
_parse_question_title,
_parse_section,
read_lime_questionnaire_structure,
)
class TestXMLSectionParsing(unittest.TestCase):
"""Test parsing <section> tags in an XML structure file"""
def test_simple_section(self):
"""Test simple section parsing"""
section = BeautifulSoup(
"""
<section id="16">
<sectionInfo>
<position>title</position>
<text>Group 1</text>
<administration>self</administration>
</sectionInfo>
<sectionInfo>
<position>before</position>
<text>This is Question Group 1</text>
<administration>self</administration>
</sectionInfo>
<question></question>
</section>""",
"xml",
)
self.assertDictEqual(
_parse_section(section.section),
{"id": 16, "title": "Group 1", "info": "This is Question Group 1"},
)
def test_multiply_info_sections(self):
"""Test simple section parsing"""
section = BeautifulSoup(
"""
<section id="16">
<sectionInfo>
<position>title</position>
<text>Group 1</text>
<administration>self</administration>
</sectionInfo>
<sectionInfo>
<position>before</position>
<text>This is Question Group 1</text>
<administration>self</administration>
</sectionInfo>
<sectionInfo>
<position>after</position>
<text>This is Question Group 1</text>
<administration>self</administration>
</sectionInfo>
<question></question>
</section>""",
"xml",
)
self.assertDictEqual(
_parse_section(section.section),
{
"id": 16,
"title": "Group 1",
"info": "This is Question Group 1 This is Question Group 1",
},
)
def test_long_description(self):
"""Test a section with a long description"""
section = BeautifulSoup(
"""
<section id="13913">
<sectionInfo>
<position>title</position>
<text>Supervision</text>
<administration>self</administration>
</sectionInfo>
<sectionInfo>
<position>before</position>
<text><p style="border:medium none;border-bottom:0cm none #000000;padding-bottom:0cm;margin:0cm 0cm .0001pt;padding:0cm;"><b id="docs-internal-guid-90bd833e-7fff-2c78-398d-1ee9bdc67ae4">For the following questions, we would like to make the distinction between “formal” and “direct” supervisor clear: </b></p>
“Formal” supervisor refers to the main advisor of your thesis as present in your committee.</b></p>
“Direct” supervisor refers to the person you actually consult and discuss your project with on a more regular basis.</b></p>
<p style="border:medium none;border-bottom:0cm none #000000;padding-bottom:0cm;margin:0cm 0cm .0001pt;padding:0cm;">Section 4/8</p></text>
<administration>self</administration>
</sectionInfo>
<question></question>
</section>""",
"xml",
)
self.assertDictEqual(
_parse_section(section.section),
{
"id": 13913,
"title": "Supervision",
"info": (
"For the following questions, "
"we would like to make the distinction between “formal” and “direct” "
"supervisor clear: “Formal” supervisor refers to the main advisor of "
"your thesis as present in your committee. “Direct” supervisor refers "
"to the person you actually consult and discuss your project with on a "
"more regular basis. Section 4/8"
),
},
)
class TestXMLQuestionParsing(unittest.TestCase):
"""Test parsing <question> tags in an XML structure file"""
maxDiff = None
def test_question_title_parsing(self):
question = BeautifulSoup(
"""<question>
<text><p>Do you have one of the following (multiple answers possible)?</p>
<p style="border:medium none;border-bottom:0cm none #000000;padding-bottom:0cm;margin:0cm 0cm .0001pt;padding:0cm;"> </p></text>
</question>""",
"xml",
)
question = question.question
self.assertEqual(
_parse_question_title(question),
"Do you have one of the following (multiple answers possible)?",
)
def test_question_description_parsing(self):
question = BeautifulSoup(
"""
<question>
<text>Is your formal supervisor your direct supervisor?</text>
<directive>
<position>during</position>
<text><p>“Formal” supervisor refers to the main advisor of your thesis as present in your committee.
“Direct” supervisor refers to the person you actually consult and discuss your project with on a more regular basis.</p></text>
<administration>self</administration>
</directive>
<response varName="E4"></response>
</question>""",
"xml",
)
question = question.question
self.assertEqual(
_parse_question_description(question),
(
"“Formal” supervisor refers to the main advisor of your thesis as present "
"in your committee. “Direct” supervisor refers to the person you actually "
"consult and discuss your project with on a more regular basis."
),
)
def test_choice_question_without_contingent(self):
question = BeautifulSoup(
"""
<question>
<text>This is Group 1 Question 1 of type "5 point choice".</text>
<directive>
<position>during</position>
<text>Help text for G1Q1
</text>
<administration>self</administration>
</directive>
<response varName="G1Q1">
<fixed>
<category>
<label>1</label>
<value>1</value>
</category>
<category>
<label>2</label>
<value>2</value>
</category>
<category>
<label>3</label>
<value>3</value>
</category>
<category>
<label>4</label>
<value>4</value>
</category>
<category>
<label>5</label>
<value>5</value>
</category>
</fixed>
</response>
</question>""",
"xml",
)
question = question.question
choices = {"1": "1", "2": "2", "3": "3", "4": "4", "5": "5"}
self.assertEqual(_parse_question_subquestions(question), [])
self.assertEqual(
_parse_question_responses(question),
[
(
{
"name": "G1Q1",
"format": None,
"length": None,
"label": None,
"choices": choices,
},
None,
)
],
)
self.assertEqual(
_parse_question(question),
[
{
"name": "G1Q1",
"label": 'This is Group 1 Question 1 of type "5 point choice".',
"format": None,
"choices": choices,
"question_group": "G1Q1",
"question_label": 'This is Group 1 Question 1 of type "5 point choice".',
"question_description": "Help text for G1Q1",
"type": "single-choice",
}
],
)
def test_choice_question_with_contingent(self):
question = BeautifulSoup(
"""
<question>
<text>My overall work is predominantly</text>
<response varName="A3">
<fixed>
<category>
<label>Option 1</label>
<value>B1</value>
</category>
<category>
<label>Option 2</label>
<value>B2</value>
</category>
<category>
<label>Other</label>
<value>-oth-</value>
<contingentQuestion varName="A3other">
<text>Other</text>
<length>24</length>
<format>longtext</format>
</contingentQuestion>
</category>
</fixed>
</response>
</question>""",
"xml",
)
question = question.question
choices = {"B1": "Option 1", "B2": "Option 2", "-oth-": "Other"}
self.assertEqual(_parse_question_subquestions(question), [])
self.assertEqual(
_parse_question_responses(question),
[
(
{
"name": "A3",
"format": None,
"length": None,
"label": None,
"choices": choices,
},
{
"name": "A3other",
"format": "longtext",
"length": "24",
"text": "Other",
"contingent_of_name": "A3",
"contingent_of_choice": "-oth-",
},
)
],
)
self.assertEqual(
_parse_question(question),
[
{
"name": "A3",
"label": "My overall work is predominantly",
"format": None,
"choices": choices,
"question_group": "A3",
"question_label": "My overall work is predominantly",
"question_description": "",
"type": "single-choice",
},
{
"name": "A3other",
"label": "My overall work is predominantly / Other",
"format": "longtext",
"contingent_of_name": "A3",
"contingent_of_choice": "-oth-",
"question_group": "A3",
"question_label": "My overall work is predominantly",
"question_description": "",
"type": "single-choice",
},
],
)
def test_question_without_choices(self):
question = BeautifulSoup(
"""
<question>
<text>Some cool question</text>
<response varName="Q1">
<free>
<format>text</format>
<length>10</length>
<label>What is good about it?</label>
</free>
</response>
</question>
""",
"xml",
)
question = question.question
self.assertEqual(_parse_question_subquestions(question), [])
self.assertEqual(
_parse_question_responses(question),
[
(
{
"name": "Q1",
"format": "text",
"length": "10",
"label": "What is good about it?",
"choices": None,
},
None,
)
],
)
self.assertEqual(
_parse_question(question),
[
{
"name": "Q1",
"label": "What is good about it?",
"format": "text",
"choices": None,
"question_group": "Q1",
"question_label": "Some cool question",
"question_description": "",
"type": "free",
},
],
)
def test_multi_response_question(self):
question = BeautifulSoup(
"""
<question>
<text>Some cool question</text>
<response varName="Q1_R1">
<free>
<format>text</format>
<length>10</length>
<label>What is good about it?</label>
</free>
</response>
<response varName="Q1_R2">
<free>
<format>text</format>
<length>10</length>
<label>What is bad about it?</label>
</free>
</response>
</question>
""",
"xml",
)
question = question.question
self.assertEqual(_parse_question_subquestions(question), [])
self.assertEqual(
_parse_question(question),
[
{
"name": "Q1_R1",
"label": "What is good about it?",
"format": "text",
"choices": None,
"question_group": "Q1",
"question_label": "Some cool question",
"question_description": "",
"type": "free",
},
{
"name": "Q1_R2",
"label": "What is bad about it?",
"format": "text",
"choices": None,
"question_group": "Q1",
"question_label": "Some cool question",
"question_description": "",
"type": "free",
},
],
)
def test_question_with_subquestions(self):
question = BeautifulSoup(
"""
<question>
<text>This is Group 2 Question 8 of type "array by column".</text>
<directive>
<position>during</position>
<text>Help text for G2Q8</text>
<administration>self</administration>
</directive>
<subQuestion varName="G2Q8_SQ001">
<text>How do you rate this?</text>
</subQuestion>
<subQuestion varName="G2Q8_SQ002">
<text>How do you rate that?</text>
</subQuestion>
<response varName="G2Q8">
<fixed rotate="true">
<category>
<label>Option 1</label>
<value>A1</value>
</category>
<category>
<label>Option 2</label>
<value>A2</value>
</category>
<category>
<label>Option 3</label>
<value>A3</value>
</category>
</fixed>
</response>
</question>
""",
"xml",
)
question = question.question
choices = {"A1": "Option 1", "A2": "Option 2", "A3": "Option 3"}
self.assertEqual(
_parse_question_subquestions(question),
[
("G2Q8_SQ001", "How do you rate this?"),
("G2Q8_SQ002", "How do you rate that?"),
],
)
self.assertEqual(
_parse_question(question),
[
{
"name": "G2Q8_SQ001",
"label": "How do you rate this?",
"format": None,
"choices": choices,
"question_group": "G2Q8",
"question_label": 'This is Group 2 Question 8 of type "array by column".',
"question_description": "Help text for G2Q8",
"type": "array",
},
{
"name": "G2Q8_SQ002",
"label": "How do you rate that?",
"format": None,
"choices": choices,
"question_group": "G2Q8",
"question_label": 'This is Group 2 Question 8 of type "array by column".',
"question_description": "Help text for G2Q8",
"type": "array",
},
],
)
class TestXMLQuestionnarieParsing(unittest.TestCase):
def test_test_survery_structure_file(self):
structure = read_lime_questionnaire_structure(
# "tests/data/test_survey_structure.xml"
"data/survey_structure.xml"
)
self.assertEqual(len(structure["sections"]), 10)
self.assertEqual(len(structure["questions"]), 453)
structure = read_lime_questionnaire_structure(
# "tests/data/test_survey_structure.xml"
"data/survey_structure_2021.xml"
)
self.assertEqual(len(structure["sections"]), 13)
self.assertEqual(len(structure["questions"]), 553)
if __name__ == "__main__":
unittest.main()
| 18,705 | 4,814 |
from http import HTTPStatus
from typing import Any, Dict
from aws_lambda_powertools.metrics.metrics import MetricUnit
from aws_lambda_powertools.utilities.feature_flags.exceptions import ConfigurationStoreError, SchemaValidationError
from aws_lambda_powertools.utilities.parser import ValidationError, parse
from aws_lambda_powertools.utilities.parser.envelopes import ApiGatewayEnvelope
from aws_lambda_powertools.utilities.typing import LambdaContext
from service.handlers.schemas.dynamic_configuration import FeatureFlagsNames, MyConfiguration
from service.handlers.schemas.env_vars import MyHandlerEnvVars
from service.handlers.schemas.input import Input
from service.handlers.schemas.output import Output
from service.handlers.utils.dynamic_configuration import get_dynamic_configuration_store, parse_configuration
from service.handlers.utils.env_vars_parser import get_environment_variables, init_environment_variables
from service.handlers.utils.http_responses import build_response
from service.handlers.utils.observability import logger, metrics, tracer
@tracer.capture_method(capture_response=False)
def inner_function_example(my_name: str, order_item_count: int) -> Output:
# process input, etc. return output
config_store = get_dynamic_configuration_store()
campaign: bool = config_store.evaluate(
name=FeatureFlagsNames.TEN_PERCENT_CAMPAIGN.value,
context={},
default=False,
)
logger.debug('campaign feature flag value', extra={'campaign': campaign})
premium: bool = config_store.evaluate(
name=FeatureFlagsNames.PREMIUM.value,
context={'customer_name': my_name},
default=False,
)
logger.debug('premium feature flag value', extra={'premium': premium})
return Output(success=True, order_item_count=order_item_count)
@init_environment_variables(model=MyHandlerEnvVars)
@metrics.log_metrics
@tracer.capture_lambda_handler(capture_response=False)
def my_handler(event: Dict[str, Any], context: LambdaContext) -> Dict[str, Any]:
logger.set_correlation_id(context.aws_request_id)
logger.info('my_handler is called, calling inner_function_example')
env_vars: MyHandlerEnvVars = get_environment_variables(model=MyHandlerEnvVars)
logger.debug('environment variables', extra=env_vars.dict())
try:
my_configuration: MyConfiguration = parse_configuration(model=MyConfiguration)
logger.debug('fetched dynamic configuration', extra={'configuration': my_configuration.dict()})
except (SchemaValidationError, ConfigurationStoreError) as exc:
logger.exception(f'dynamic configuration error, error={str(exc)}')
return build_response(http_status=HTTPStatus.INTERNAL_SERVER_ERROR, body={})
try:
# we want to extract and parse the HTTP body from the api gw envelope
input: Input = parse(event=event, model=Input, envelope=ApiGatewayEnvelope)
logger.info('got create request', extra={'order_item_count': input.order_item_count})
except (ValidationError, TypeError) as exc:
logger.error('event failed input validation', extra={'error': str(exc)})
return build_response(http_status=HTTPStatus.BAD_REQUEST, body={})
response: Output = inner_function_example(input.my_name, input.order_item_count)
logger.info('inner_function_example finished successfully')
metrics.add_metric(name='ValidEvents', unit=MetricUnit.Count, value=1)
return build_response(http_status=HTTPStatus.OK, body=response.dict())
| 3,501 | 986 |
class Breadcrumb:
def __init__(self, url, text):
self.url = url
self.text = text
url_dict = {
'Main Menu': '/',
'Invoices': '/invoices',
'Customers': '/customers',
'Settings': '/settings'
}
def breadcrumbs(*shortwords):
return [Breadcrumb(url_dict[word], word) for word in shortwords]
| 327 | 115 |
# Add bars for "Gold" with the label "Gold"
ax.bar(medals.index, medals["Gold"], label="Gold")
# Stack bars for "Silver" on top with label "Silver"
ax.bar(medals.index, medals["Silver"], bottom=medals["Gold"], label="Silver")
# Stack bars for "Bronze" on top of that with label "Bronze"
ax.bar(medals.index, medals["Bronze"], bottom=medals["Silver"]+medals["Gold"], label="Bronze")
# Display the legend
ax.legend()
plt.show() | 441 | 170 |
/home/runner/.cache/pip/pool/64/da/97/267e8a2c0079f193f0db8c07cf48ce560bdfa25b876ba5b0c0a062bc16 | 96 | 72 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
issues_data = {
"xss": {
"severity": "1",
"description": "Cross-Site Scripting detected",
"categories": ["security"],
"title": "XSS"
},
"session_fixation": {
"severity": "2",
"description": "Session fixation detected",
"categories": ["security"],
"title": "Session fixation"
},
"file_inclusion": {
"severity": "2",
"description": "File inclusion detected",
"categories": ["security"],
"title": "File Inclusion"
},
"file_disclosure": {
"severity": "1",
"description": "File disclosure detected",
"categories": ["security"],
"title": "File disclosure"
},
"ldap_injection": {
"severity": "1",
"description": "LDAP Injection detected",
"categories": ["security"],
"title": "LDAP Injection"
},
"xml_injection": {
"severity": "1",
"description": "XML Injection detected",
"categories": ["security"],
"title": "XML Injection"
},
"sql_injection": {
"severity": "1",
"description": "SQL Injection detected",
"categories": ["security"],
"title": "SQL Injection"
},
"code_injection": {
"severity": "1",
"description": "Code Injection detected",
"categories": ["security"],
"title": "Code Injection"
},
"header_injection": {
"severity": "1",
"description": "Header Injection detected",
"categories": ["security"],
"title": "Header Injection"
},
"idor": {
"severity": "1",
"description": "Insecure Direct Object Reference detected",
"categories": ["security"],
"title": "Insecure Direct Object Reference"
},
"command_injection": {
"severity": "1",
"description": "Command Injection detected",
"categories": ["security"],
"title": "Command Injection"
},
"mail command_injection": {
"severity": "1",
"description": "Mail Command Injection detected",
"categories": ["security"],
"title": "Mail Command Injection"
},
"w32api command_injection": {
"severity": "1",
"description": "w32api Command Injection detected",
"categories": ["security"],
"title": "w32api Command Injection"
}
}
| 2,192 | 715 |
from __future__ import unicode_literals
import uuid
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from django.db import models
# Used for object modifications
CREATE = 'CREATE'
UPDATE = 'UPDATE'
DELETE = 'DELETE'
# Used for m2m modifications
ADD = 'ADD'
REMOVE = 'REMOVE'
CLEAR = 'CLEAR'
class TrackingEvent(models.Model):
ACTIONS = (
(CREATE, _('Create')),
(UPDATE, _('Update')),
(DELETE, _('Delete')),
(ADD, _('Add')),
(REMOVE, pgettext_lazy('Remove from something', 'Remove')),
(CLEAR, _('Clear')),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
date = models.DateTimeField(
_("Date"), auto_now_add=True, editable=False
)
action = models.CharField(
_('Action'), max_length=6, choices=ACTIONS, editable=False
)
object_content_type = models.ForeignKey(
ContentType,
related_name='workon_tracking_object_content_type',
editable=False
)
object_id = models.PositiveIntegerField(editable=False, null=True)
object = GenericForeignKey('object_content_type', 'object_id')
object_repr = models.CharField(
_("Object representation"),
help_text=_(
"Object representation, useful if the object is deleted later."
),
max_length=250,
editable=False
)
user_content_type = models.ForeignKey(
ContentType,
related_name='workon_tracking_user_content_type',
editable=False,
null=True,
)
user_id = models.PositiveIntegerField(editable=False, null=True)
user = GenericForeignKey('user_content_type', 'user_id')
user_repr = models.CharField(
_("User representation"),
help_text=_(
"User representation, useful if the user is deleted later."
),
max_length=250,
editable=False
)
class Meta:
db_table = "workon_tracking_tracking_event"
verbose_name = _('Tracking event')
verbose_name_plural = _('Tracking events')
ordering = ['-date']
class TrackedFieldModification(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
event = models.ForeignKey(
TrackingEvent, verbose_name=_("Event"), related_name='fields',
editable=False
)
field = models.CharField(_("Field"), max_length=40, editable=False)
old_value = models.TextField(
_("Old value"),
help_text=_("JSON serialized"),
null=True,
editable=False,
)
new_value = models.TextField(
_("New value"),
help_text=_("JSON serialized"),
null=True,
editable=False,
)
class Meta:
db_table = "workon_tracking_tracked_field_modification"
verbose_name = _('Tracking field modification')
verbose_name_plural = _('Tracking field modifications') | 3,170 | 973 |
""" Code for saving/loading pytorch models and batches of adversarial images
CHECKPOINT NAMING CONVENTIONS:
<unique_experiment_name>.<architecture_abbreviation>.<6 digits of epoch number>.path
e.g.
fgsm_def.resnet32.20180301.120000.path
All checkpoints are stored in CHECKPOINT_DIR
Checkpoints are state dicts only!!!
"""
import torch
import math
import os
import re
import glob
import config
import numpy as np
import utils.pytorch_utils as utils
import random
CHECKPOINT_DIR = config.MODEL_PATH
OUTPUT_IMAGE_DIR = config.OUTPUT_IMAGE_PATH
##############################################################################
# #
# CHECKPOINTING MODELS #
# #
##############################################################################
def clear_experiment(experiment_name, architecture):
""" Deletes all saved state dicts for an experiment/architecture pair """
for filename in params_to_filename(experiment_name, architecture):
full_path = os.path.join(*[CHECKPOINT_DIR, filename])
os.remove(full_path) if os.path.exists(full_path) else None
def list_saved_epochs(experiment_name, architecture):
""" Returns a list of int epochs we've checkpointed for this
experiment name and architecture
"""
safe_int_cast = lambda s: int(s) if s.isdigit() else s
extract_epoch = lambda f: safe_int_cast(f.split('.')[-2])
filename_list = params_to_filename(experiment_name, architecture)
return [extract_epoch(f) for f in filename_list]
def params_to_filename(experiment_name, architecture, epoch_val=None):
""" Outputs string name of file.
ARGS:
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int/(intLo, intHi)/None -
- if int we return this int exactly
- if (intLo, intHi) we return all existing filenames with
highest epoch in range (intLo, intHi), in sorted order
- if None, we return all existing filenames with params
in ascending epoch-sorted order
RETURNS:
filenames: string or (possibly empty) string[] of just the base name
of saved models
"""
if isinstance(epoch_val, int):
return '.'.join([experiment_name, architecture, '%06d' % epoch_val,
'path'])
elif epoch_val == 'best':
return '.'.join([experiment_name, architecture, epoch_val,
'path'])
glob_prefix = os.path.join(*[CHECKPOINT_DIR,
'%s.%s.*' % (experiment_name, architecture)])
re_prefix = '%s\.%s\.' % (experiment_name, architecture)
re_suffix = r'\.path'
valid_name = lambda f: bool(re.match(re_prefix + r'(\d{6}|best)' +
re_suffix, f))
safe_int_cast = lambda s: int(s) if s.isdigit() else s
select_epoch = lambda f: safe_int_cast(re.sub(re_prefix, '',
re.sub(re_suffix, '', f)))
valid_epoch = lambda e: ((e == 'best') or
(e >= (epoch_val or (0, 0))[0] and
e <= (epoch_val or (0, float('inf')))[1]))
filename_epoch_pairs = []
best_filename = []
for full_path in glob.glob(glob_prefix):
filename = os.path.basename(full_path)
if not valid_name(filename):
continue
epoch = select_epoch(filename)
if valid_epoch(epoch):
if epoch != 'best':
filename_epoch_pairs.append((filename, epoch))
else:
best_filename.append(filename)
return best_filename +\
[_[0] for _ in sorted(filename_epoch_pairs, key=lambda el: el[1])]
def save_state_dict(experiment_name, architecture, epoch_val, model,
k_highest=10):
""" Saves the state dict of a model with the given parameters.
ARGS:
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int - which epoch we're saving
model : model - object we're saving the state dict of
k_higest : int - if not None, we make sure to not include more than
k state_dicts for (experiment_name, architecture) pair,
keeping the k-most recent if we overflow
RETURNS:
The model we saved
"""
# First resolve THIS filename
this_filename = params_to_filename(experiment_name, architecture, epoch_val)
# Next clear up memory if too many state dicts
current_filenames = [_ for _ in
params_to_filename(experiment_name, architecture)
if not _.endswith('.best.path')]
delete_els = []
if k_highest is not None:
num_to_delete = len(current_filenames) - k_highest + 1
if num_to_delete > 0:
delete_els = sorted(current_filenames)[:num_to_delete]
for delete_el in delete_els:
full_path = os.path.join(*[CHECKPOINT_DIR, delete_el])
os.remove(full_path) if os.path.exists(full_path) else None
# Finally save the state dict
torch.save(model.state_dict(), os.path.join(*[CHECKPOINT_DIR,
this_filename]))
return model
def load_state_dict_from_filename(filename, model):
""" Skips the whole parameter argument thing and just loads the whole
state dict from a filename.
ARGS:
filename : string - filename without directories
model : nn.Module - has 'load_state_dict' method
RETURNS:
the model loaded with the weights contained in the file
"""
assert len(glob.glob(os.path.join(*[CHECKPOINT_DIR, filename]))) == 1
# LOAD FILENAME
# If state_dict in keys, use that as the loader
right_dict = lambda d: d.get('state_dict', d)
model.load_state_dict(right_dict(torch.load(
os.path.join(*[CHECKPOINT_DIR, filename]))))
return model
def load_state_dict(experiment_name, architecture, epoch, model):
""" Loads a checkpoint that was previously saved
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int - which epoch we're loading
"""
filename = params_to_filename(experiment_name, architecture, epoch)
return load_state_dict_from_filename(filename, model)
###############################################################################
# #
# CHECKPOINTING DATA #
# #
###############################################################################
"""
This is a hacky fix to save batches of adversarial images along with their
labels.
"""
class CustomDataSaver(object):
# TODO: make this more pytorch compliant
def __init__(self, image_subdirectory):
self.image_subdirectory = image_subdirectory
# make this folder if it doesn't exist yet
def save_minibatch(self, examples, labels):
""" Assigns a random name to this minibatch and saves the examples and
labels in two separate files:
<random_name>.examples.npy and <random_name>.labels.npy
ARGS:
examples: Variable or Tensor (NxCxHxW) - examples to be saved
labels : Variable or Tensor (N) - labels matching the examples
"""
# First make both examples and labels into numpy arrays
examples = examples.cpu().numpy()
labels = labels.cpu().numpy()
# Make a name for the files
random_string = str(random.random())[2:] # DO THIS BETTER WHEN I HAVE INTERNET
# Save both files
example_file = '%s.examples.npy' % random_string
example_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
example_file)
np.save(example_path, examples)
label_file = '%s.labels.npy' % random_string
label_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
label_file)
np.save(label_path, labels)
class CustomDataLoader(object):
# TODO: make this more pytorch compliant
def __init__(self, image_subdirectory, batch_size=128, to_tensor=True,
use_gpu=False):
super(CustomDataLoader, self).__init__()
self.image_subdirectory = image_subdirectory
self.batch_size = batch_size
assert to_tensor >= use_gpu
self.to_tensor = to_tensor
self.use_gpu = use_gpu
def _prepare_data(self, examples, labels):
""" Takes in numpy examples and labels and tensor-ifies and cuda's them
if necessary
"""
if self.to_tensor:
examples = torch.Tensor(examples)
labels = torch.Tensor(labels)
return utils.cudafy(self.use_gpu, (examples, labels))
def _base_loader(self, prefix, which):
assert which in ['examples', 'labels']
filename = '%s.%s.npy' % (prefix, which)
full_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
filename)
return np.load(full_path)
def _example_loader(self, prefix):
""" Loads the numpy array of examples given the random 'prefix' """
return self._base_loader(prefix, 'examples')
def _label_loader(self, prefix):
""" Loads the numpy array of labels given the random 'prefix' """
return self._base_loader(prefix, 'labels')
def __iter__(self):
# First collect all the filenames:
glob_prefix = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
'*')
files = glob.glob(glob_prefix)
valid_random_names = set(os.path.basename(_).split('.')[0]
for _ in files)
# Now loop through filenames and yield out minibatches of correct size
running_examples, running_labels = [], []
running_size = 0
for random_name in valid_random_names:
# Load data from files and append to 'running' lists
loaded_examples = self._example_loader(random_name)
loaded_labels = self._label_loader(random_name)
running_examples.append(loaded_examples)
running_labels.append(loaded_labels)
running_size += loaded_examples.shape[0]
if running_size < self.batch_size:
# Load enough data to populate one minibatch, which might
# take multiple files
continue
# Concatenate all images together
merged_examples = np.concatenate(running_examples, axis=0)
merged_labels = np.concatenate(running_labels, axis=0)
# Make minibatches out of concatenated things,
for batch_no in range(running_size // self.batch_size):
index_lo = batch_no * self.batch_size
index_hi = index_lo + self.batch_size
example_batch = merged_examples[index_lo:index_hi]
label_batch = merged_labels[index_lo:index_hi]
yield self._prepare_data(example_batch, label_batch)
# Handle any remainder for remaining files
remainder_idx = (running_size // self.batch_size) * self.batch_size
running_examples = [merged_examples[remainder_idx:]]
running_labels = [merged_labels[remainder_idx:]]
running_size = running_size - remainder_idx
# If we're out of files, yield this last sub-minibatch of data
if running_size > 0:
merged_examples = np.concatenate(running_examples, axis=0)
merged_labels = np.concatenate(running_labels, axis=0)
yield self._prepare_data(merged_examples, merged_labels)
| 12,377 | 3,443 |
import os, sys
import shutil
CARBON_DIR = os.path.dirname(__file__)
USAGE = '''\
'''
## USAGE:
## sys.path.append('path/to/carbon/')
## import carbontools.py as cbtools
## lib = cbtools.GET_CARBON_LIB(env)
def GET_CARBON_LIB(env):
## TODO: generate "*.gen.h" files
SOURCES = []
cbenv = env.Clone();
cbenv.Append(CPPPATH=[os.path.join(CARBON_DIR, 'include/')])
ALL_SOURCES = [
'src/var/*.cpp',
'src/core/*.cpp',
'src/native/*.cpp',
'src/compiler/*.cpp',
'src/thirdparty/dlfcn-win32/*.c',
]
for src in ALL_SOURCES(cbenv):
SOURCES.append(cbenv.Glob(os.path.join(CARBON_DIR, src)))
lib = cbenv.Library(
target = os.path.join(CARBON_DIR, 'bin/carbon'),
source = SOURCES)
return lib
def main():
argcount = len(sys.argv)
if argcount < 2:
print(USAGE_STRING)
exit()
## switch commands
if sys.argv[1] == 'clean':
cleanall = False
for i in range(2, argcount):
if sys.argv[i] in ('--all', '-a'):
cleanall = True
else:
error_command(sys.argv[i])
clean(cleanall)
else:
error_command(sys.argv[1])
## Internal methods ####
def error_command(cmd):
print('[*]: ERROR: unknown command "'+ cmd + '"\n' + USAGE)
exit(-1)
def error_exit(msg):
print('[*]: ERROR: ' + msg + '"\n' + USAGE)
exit(-1)
def get_platform():
if sys.platform == 'win32': return 'windows'
elif sys.platform in ('linux', 'linux2'): return 'x11'
elif sys.platform == 'darwin': return 'osx'
else: error_exit("platform(%s) not supported." % sys.platform)
def clean():
CLEAN_DIRS = [
'x64/',
'debug/'
'release/',
'debug/',
'bin/',
'.vs',
'.vscode',
]
CLEAN_FILES = [
'.pdb',
'.idb',
'.ilk',
'.obj',
'.sln',
'.vcxproj',
'.vcxproj.filters',
'.vcxproj.user',
'.sconsign.dblite',
]
os.system('scons -c')
print('\n[*]: cleaning all files ...')
for _dir in CLEAN_DIRS:
try:
shutil.rmtree(_dir)
print('[*]: Removed - %s' % _dir)
except:
pass
for path, dirs, files in os.walk('.'):
for file in files:
for suffix in CLEAN_FILES:
if file.endswith(suffix):
os.remove(os.path.join(path, file))
print('[*]: Removed - %s' % os.path.join(path, file))
print('[*]: done cleaning targets.')
if __name__ == '__main__':
main() | 2,219 | 1,022 |
# First Party
from smdebug.profiler import SMTFProfilerEvents
def test_smtfprofiler_events(trace_file="./tests/profiler/smtf_profiler_trace.json"):
trace_json_file = trace_file
print(f"Reading the trace file {trace_json_file}")
t_events = SMTFProfilerEvents(trace_json_file)
all_trace_events = t_events.get_all_events()
num_trace_events = len(all_trace_events)
print(f"Number of events read = {num_trace_events}")
assert num_trace_events == 49
event_list = t_events.get_events_at(1589314018458800000) # nanoseconds
print(f"Number of events at 15013686 are {len(event_list)}")
assert len(event_list) == 1
completed_event_list = t_events.get_events_within_range(0, 1589314018470000000) # nanoseconds
print(f"Number of events occurred between 0 and 15013686 are {len(completed_event_list)}")
assert len(completed_event_list) == 34
start_time_sorted = t_events.get_events_start_time_sorted()
start_time_for_first_event = start_time_sorted[0].start_time
print(f"The first event started at {start_time_for_first_event}")
assert start_time_for_first_event == 1589314018458743000
end_time_sorted = t_events.get_events_end_time_sorted()
end_time_for_last_event = end_time_sorted[-1].end_time
print(f"The first event started at {end_time_for_last_event}")
assert end_time_for_last_event == 1589314018481947000
| 1,394 | 558 |
from typing import Type
import numpy as np
import pytest
from PartSegCore.segmentation import ROIExtractionAlgorithm
from PartSegCore.segmentation.algorithm_base import ROIExtractionResult, SegmentationLimitException
from PartSegCore.segmentation.restartable_segmentation_algorithms import final_algorithm_list as restartable_list
from PartSegCore.segmentation.segmentation_algorithm import (
CellFromNucleusFlow,
ThresholdFlowAlgorithm,
close_small_holes,
)
from PartSegCore.segmentation.segmentation_algorithm import final_algorithm_list as algorithm_list
def empty(*args):
pass
@pytest.fixture(autouse=True)
def fix_threshold_flow(monkeypatch):
values = ThresholdFlowAlgorithm.get_default_values()
values["threshold"]["values"]["core_threshold"]["values"]["threshold"] = 10
values["threshold"]["values"]["base_threshold"]["values"]["threshold"] = 5
def _param(self):
return values
monkeypatch.setattr(ThresholdFlowAlgorithm, "get_default_values", _param)
values2 = CellFromNucleusFlow.get_default_values()
values2["nucleus_threshold"]["values"]["threshold"] = 10
values2["cell_threshold"]["values"]["threshold"] = 5
def _param2(self):
return values2
monkeypatch.setattr(CellFromNucleusFlow, "get_default_values", _param2)
@pytest.mark.parametrize("algorithm", restartable_list + algorithm_list)
@pytest.mark.parametrize("masking", [True, False])
def test_segmentation_algorithm(image, algorithm: Type[ROIExtractionAlgorithm], masking):
assert algorithm.support_z() is True
assert algorithm.support_time() is False
assert isinstance(algorithm.get_steps_num(), int)
instance = algorithm()
instance.set_image(image)
if masking:
instance.set_mask(image.get_channel(0) > 0)
instance.set_parameters(**instance.get_default_values())
if not masking and "Need mask" in algorithm.get_fields():
with pytest.raises(SegmentationLimitException):
instance.calculation_run(empty)
else:
res = instance.calculation_run(empty)
assert isinstance(instance.get_info_text(), str)
assert isinstance(res, ROIExtractionResult)
instance.clean()
@pytest.mark.parametrize("ndim", (2, 3))
@pytest.mark.parametrize("dtype", (np.uint8, bool))
def test_close_small_holes(ndim, dtype):
data = np.zeros((10,) * ndim, dtype=dtype)
data[(slice(1, -1),) * ndim] = 1
copy = data.copy()
data[(slice(3, -3),) * ndim] = 0
res = close_small_holes(data, 5 ** 2)
assert np.all(res == copy)
| 2,551 | 838 |
# coding=utf-8
import logging
import os
from flask import Flask
from cloudware_server.route.base import register_routes
def config_logger():
"""
设置日志等级
"""
logging.getLogger().setLevel(logging.INFO)
config_logger()
def create_app(config=None):
"""
创建 bootstrap app
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if not config:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(config)
try:
if not os.path.exists(app.instance_path):
os.makedirs(app.instance_path)
except OSError as e:
logging.error('启动失败 %s', e)
# 注册路由
register_routes(app)
return app
app = create_app()
logging.info("%s", os.path.join(app.instance_path, 'flaskr.sqlite'))
app.run(host='localhost', port=5000)
| 958 | 344 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from bson.objectid import ObjectId
from config import setting
import helper
db = setting.db_web
url = ('/online/batch_job')
# - 批量处理订单
class handler:
def GET(self):
if helper.logged(helper.PRIV_USER,'BATCH_JOB'):
render = helper.create_render()
#user_data=web.input(start_date='', shop='__ALL__')
# 查找shop
db_shop = helper.get_shop_by_uid()
shop_name = helper.get_shop(db_shop['shop'])
# 统计线上订单
condition = {
'shop' : db_shop['shop'],
'status' : {'$in' : ['PAID','DISPATCH','ONROAD']},
'type' : {'$in' : ['TUAN', 'SINGLE']}, # 只拼团用
}
db_sale2 = db.order_app.find(condition, {
'order_id' : 1,
'paid_time' : 1,
'cart' : 1,
'type' : 1,
'status' : 1,
'address' : 1,
})
skus={}
for i in db_sale2:
# 区分省份
sheng = i['address'][8].split(',')[0] if len(i['address'])>=9 else u'未知'
if skus.has_key(i['cart'][0]['tuan_id']):
if skus[i['cart'][0]['tuan_id']].has_key(sheng):
skus[i['cart'][0]['tuan_id']][sheng]['num'] += 1
skus[i['cart'][0]['tuan_id']][sheng]['paid'] += (1 if i['status']=='PAID' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['dispatch'] += (1 if i['status']=='DISPATCH' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['onroad'] += (1 if i['status']=='ONROAD' else 0)
else:
skus[i['cart'][0]['tuan_id']][sheng] = {}
skus[i['cart'][0]['tuan_id']][sheng]['num'] = 1
skus[i['cart'][0]['tuan_id']][sheng]['paid'] = (1 if i['status']=='PAID' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['dispatch'] = (1 if i['status']=='DISPATCH' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['onroad'] = (1 if i['status']=='ONROAD' else 0)
else:
r = db.pt_store.find_one({'tuan_id':i['cart'][0]['tuan_id']},{'title':1})
if r:
title = r['title']
else:
title = 'n/a'
skus[i['cart'][0]['tuan_id']] = {
'name' : title,
'tuan_id' : i['cart'][0]['tuan_id'],
}
skus[i['cart'][0]['tuan_id']][sheng]={
'num' : 1, # 要包含送的
'paid' : 1 if i['status']=='PAID' else 0, # 已付款,待拣货的, 拼团用
'dispatch' : 1 if i['status']=='DISPATCH' else 0, # 已付款,待配送, 拼团用
'onroad' : 1 if i['status']=='ONROAD' else 0, # 已付款,配送中, 拼团用
}
total_sum={}
for i in skus.keys():
for j in skus[i].keys():
if j in ['name','tuan_id']:
continue
if total_sum.has_key(j):
total_sum[j]['paid'] += skus[i][j]['paid']
total_sum[j]['dispatch'] += skus[i][j]['dispatch']
total_sum[j]['onroad'] += skus[i][j]['onroad']
else:
total_sum[j] = {}
total_sum[j]['paid'] = skus[i][j]['paid']
total_sum[j]['dispatch'] = skus[i][j]['dispatch']
total_sum[j]['onroad'] = skus[i][j]['onroad']
return render.batch_job(helper.get_session_uname(), helper.get_privilege_name(),
skus, shop_name['name'], total_sum)
else:
raise web.seeother('/')
| 2,990 | 1,633 |
from helpers.reusable_browser import *
create_driver_session()
| 64 | 20 |
import os
import re
import subprocess
import json
import networkx
from pygments import lexers, token, util
TOKENTYPE_WHITELIST = [
token.Name,
token.Name.Attribute,
token.Name.Builtin,
token.Name.Builtin.Pseudo,
token.Name.Constant,
token.Name.Decorator,
token.Name.Entity,
token.Name.Exception,
token.Name.Label,
token.Name.Namespace,
token.Name.Other,
token.Name.Tag,
token.Name.Variable,
token.Name.Variable.Class,
token.Name.Variable.Global,
token.Name.Variable.Instance
]
SUPPORTED_LANGUAGES = []
# Regular expression to parse the list of languages supported by ack as listed
# by ack --help-types
# Pattern: " --[no]python"
RE_ACK_LANGUAGES = re.compile('(?:^\s{4}--\[no\])(\w*)')
# Map GHTorrent's projects.language to ACK compatible language (if necessary).
ACK_LANGUAGE_MAP = {
'c': 'cc',
'c++': 'cpp',
'c#': 'csharp',
'objective-c': 'objc',
'ojective-c++': 'objcpp',
'javascript': 'js'
}
def init(cursor):
global SUPPORTED_LANGUAGES
ack_process2 = subprocess.Popen(
['ack', '--help-types'], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
lines, _ = [x.decode('utf-8') for x in ack_process2.communicate()]
for line in lines.split('\n'):
match = RE_ACK_LANGUAGES.match(line)
if match:
SUPPORTED_LANGUAGES.append(match.group(1))
def run(project_id, repo_path, cursor, **options):
result = 0
cursor.execute('''
SELECT
language
FROM
projects
WHERE
id = {0}
'''.format(project_id))
record = cursor.fetchone()
language = record[0]
language = language.lower() if language else language
ack_language = language
if ack_language in ACK_LANGUAGE_MAP:
ack_language = ACK_LANGUAGE_MAP[ack_language]
# Edge case if the repository language is not supported by us.
if (ack_language not in SUPPORTED_LANGUAGES) and (language.lower() != 'javascript'):
return False, result
file_paths = []
if language.lower() == 'javascript':
for root, dirs, files in os.walk(repo_path):
for _file in files:
if _file.endswith(".js"):
file_paths.append(os.path.join(root, _file))
else:
ack_process = subprocess.Popen(
['ack', '-f', "--{0}".format(ack_language), repo_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
lines, _ = [
x.decode(errors='replace') for x in ack_process.communicate()
]
file_paths = [line for line in lines.split('\n') if line.strip()]
# Immediately fail the attribute if `minimumFiles` is not met.
if len(file_paths) < options.get('minimumFiles', 2):
return False, result
graph = networkx.Graph()
if language.lower() == 'javascript':
# JavaScript: Use external utility
success = build_js_graph(repo_path, file_paths, graph)
else:
lexer = lexers.get_lexer_by_name(language)
success = build_graph(repo_path, graph, lexer)
if success:
monolithicity = get_connectedness(graph)
else:
monolithicity = 0
return monolithicity >= options['threshold'], monolithicity
def build_js_graph(repo_path, file_paths, graph):
# add nodes
for file_path in file_paths:
graph.add_node(Node(file_path))
name = repo_path.split('/')[-1] # get name of the repository
# compute and store call graph as json using js-callgraph
graph_process = f"gtimeout 1000 js-callgraph --cg {repo_path} --output {name}_graph.json >/dev/null 2>&1"
os.system(graph_process)
try:
with open('{}_graph.json'.format(name), 'r') as json_file:
# load the json representation of the call graph
calls = json.load(json_file)
for call in calls:
source_file = call['source']['file'] # identify the source of the call
target_file = call['target']['file'] # identify the target of the call
# both source and target should be nodes in the call graph, i.e., .js files
if source_file.endswith(".js") and target_file.endswith(".js"):
graph.add_edge(Node(source_file), Node(target_file)) # add edge
graph.to_undirected() # just in case, transform into undirected (should be undirected by default anyway)
os.remove('{}_graph.json'.format(name)) # delete the json representation of the call graph
return True
except IOError as err:
print(err)
return False
def build_graph(file_paths, graph, lexer):
"""
for each file in the set of files
create a node and add it to the graph
open the file
read the contents into memory
get a list of tokens from the lexer
for each token in the resulting tokens
check if the token is defining a symbol
if true, add the symbol to the file node
for each file in the set of files
open the file
read the contents into memory
get a list of token from the lexer
for each token in the resulting tokens
check if the token is using a symbol
if true:
search the graph for the node that has the symbol definition
create a relationship from the current file to the node with
the symbol definition
"""
for file_path in file_paths:
node = Node(file_path)
graph.add_node(node)
try:
with open(file_path, 'r', encoding='utf-8') as file:
contents = file.read()
tokens = lexer.get_tokens(contents)
for item in tokens:
token_type = item[0]
symbol = item[1]
if token_type in [token.Name.Function, token.Name.Class]:
node.defines.add(symbol)
elif token_type in TOKENTYPE_WHITELIST:
node.references.add(symbol)
if 'DEBUG' in os.environ:
print(node)
except FileNotFoundError as e:
continue
except UnicodeDecodeError:
continue
for caller in graph.nodes_iter():
for reference in caller.references:
for callee in graph.nodes_iter():
if callee is not caller and reference in callee.defines:
graph.add_edge(caller, callee)
return True
def get_connectedness(graph):
components = list(networkx.connected_component_subgraphs(graph))
# N = networkx.nx_agraph.to_agraph(graph)
# N.layout(prog='dot')
# N.draw("file.png")
components.sort(key=lambda i: len(i.nodes()), reverse=True)
largest_component = components[0]
connectedness = 0
if graph.nodes() and len(graph.nodes()) > 0:
connectedness = len(largest_component.nodes()) / len(graph.nodes())
return connectedness
class Node():
def __init__(self, path):
self.path = path
self.defines = set()
self.references = set()
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return self.path == other.path
def __str__(self):
symbol_str = '\r' + '\n'.join(self.defines)
return "{0}\n{1}\n{2}".format(
self.path, '=' * len(self.path), symbol_str
)
if __name__ == '__main__':
import importlib
import json
import mysql.connector
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from lib.utilities import get_loc
os.environ['DEBUG'] = '1'
with open('../../config.json', 'r') as file:
config = json.load(file)
mysql_config = config['options']['datasource']
connection = mysql.connector.connect(**mysql_config)
connection.connect()
cursor = connection.cursor()
init(None)
result = run(sys.argv[1], sys.argv[2], cursor, threshold=0.75)
cursor.close()
connection.close()
print(result)
else:
from lib.utilities import get_loc
| 8,124 | 2,463 |
#!/usr/bin/env python
import sys
import os.path
import cv2
import numpy as np
import boto
from boto.s3.key import Key
cap = cv2.VideoCapture(0)
ret, new_image = cap.read()
if ret == False:
exit(1)
filename = 'new.jpg'
cv2.imwrite(filename, new_image)
bucket_name = 'ronhandler'
AWS_ACCESS_KEY_ID = 'AKIAIYLDR3LU2XDICTSQ'
AWS_SECRET_ACCESS_KEY = '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'
print('Connecting to AWS S3...')
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
# Hardcoding the host parameter is a workaround for bug:
# https://github.com/boto/boto/issues/621
host="s3-eu-west-1.amazonaws.com")
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
k.key = filename
testfile = "/share/" + filename
print('Uploading "%s" to "%s/%s"...' % (testfile, bucket_name, k.key))
k.set_contents_from_filename(testfile)
print('Notifying the server that we have uploaded a file...')
import urllib2
url = """http://ec2-52-16-188-96.eu-west-1.compute.amazonaws.com/admin/run.php"""
urllib2.urlopen(url).read()
| 1,065 | 443 |
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs39_detached_award_financial_assistance_2'
def test_column_headers(database):
expected_subset = {"row_number", "place_of_performance_code", "place_of_perform_country_c"}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" PrimaryPlaceOfPerformanceCode must be 00FORGN when PrimaryPlaceofPerformanceCountryCode is not USA,
not 00FORGN otherwise. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FORGN",
place_of_perform_country_c="UKR")
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FoRGN",
place_of_perform_country_c="uKr")
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="ny**987",
place_of_perform_country_c="USA")
det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NY**987",
place_of_perform_country_c="UsA")
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4])
assert errors == 0
def test_failure(database):
""" Test failure for PrimaryPlaceOfPerformanceCode must be 00FORGN when PrimaryPlaceofPerformanceCountryCode
is not USA, not 00FORGN otherwise. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FORGN",
place_of_perform_country_c="USA")
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FoRGN",
place_of_perform_country_c="usA")
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="ny**987",
place_of_perform_country_c="UKR")
det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NY**987",
place_of_perform_country_c="ukR")
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4])
assert errors == 4
| 2,569 | 772 |
import glob
import os
import cv2
import hipp.io
import hipp.utils
"""
Library for command line tools.
"""
def optimize_geotif(geotif_file_name,
output_file_name=None,
verbose=False,
print_call=False):
if output_file_name is None:
file_path, file_name, file_extension = hipp.io.split_file(geotif_file_name)
output_file_name = os.path.join(file_path,
file_name+'_optimized'+file_extension)
call = ['gdal_translate',
'-of','GTiff',
'-co','TILED=YES',
'-co','COMPRESS=LZW',
'-co','BIGTIFF=IF_SAFER',
geotif_file_name,
output_file_name]
if print_call==True:
print(*call)
else:
hipp.io.run_command(call, verbose=verbose)
return output_file_name
def optimize_geotifs(input_directory,
keep = False,
verbose=False):
print('Optimizing tifs in', input_directory, 'with:')
print(*['gdal_translate',
'-of','GTiff',
'-co','TILED=YES',
'-co','COMPRESS=LZW',
'-co','BIGTIFF=IF_SAFER'])
tifs = sorted(glob.glob(os.path.join(input_directory,'*.tif')))
output_tifs = []
for tif in tifs:
tif_optimized = hipp.utils.optimize_geotif(tif, verbose=verbose)
if not keep:
os.remove(tif)
os.rename(tif_optimized, tif)
output_tifs.append(tif)
else:
output_tifs.append(tif_optimized)
return output_tifs
def enhance_geotif_resolution(geotif_file_name,
output_file_name=None,
factor=None,
verbose=False,
print_call=False):
if output_file_name is None:
file_path, file_name, file_extension = hipp.io.split_file(geotif_file_name)
output_file_name = os.path.join(file_path,
file_name+'_high_res'+file_extension)
img = cv2.imread(geotif_file_name,cv2.IMREAD_GRAYSCALE)
w, h = img.shape[::-1]
w, h = w*factor, h*factor
call = ['gdal_translate',
'-of','GTiff',
'-co','TILED=YES',
'-co','COMPRESS=LZW',
'-co','BIGTIFF=IF_SAFER',
'-outsize',str(w),str(h),
'-r', 'cubic',
geotif_file_name,
output_file_name]
if print_call==True:
print(*call)
else:
hipp.io.run_command(call, verbose=verbose)
return output_file_name
| 2,782 | 883 |
import os
import pandas as pd
from fbd_interpreter.explainers.ml.explain_ml import ExplainML
from fbd_interpreter.logger import ROOT_DIR
FEATURES = ["a", "b"]
PREDICTIONS = [0, 0, 0, 0, 1, 1, 1, 1]
TARGETS = pd.Series([0, 0, 0, 0, 1, 1, 1, 1])
DATA = pd.DataFrame(
{"a": [0, 1, 2, 3, 4, 5, 6, 7], "b": [1, 1, 1, 1, 2, 2, 2, 2], "target": TARGETS}
)
class DummyModel(object):
"""
Dummy class that acts like a scikit-learn supervised learning model.
Always makes the same predictions.
"""
def __init__(
self,
) -> None:
self.predict = lambda x: PREDICTIONS
self.classes_ = [0, 1]
self.predict_proba = lambda x: [[0.9, 0.1]]
# TODO
"""
def test_global_pdp_ice() -> None:
interpreter = ExplainML(
model=DummyModel(),
task_name="classification",
tree_based_model=False,
features_name=FEATURES,
features_to_interpret=FEATURES,
target_col="target",
out_path=os.path.join(ROOT_DIR, "../outputs/tests"),
)
interpreter.global_pdp_ice(DATA)
"""
| 1,070 | 438 |
import numpy as np
import numpy.testing as npt
import pytest
from sklearn.preprocessing import OneHotEncoder
from timeserio.preprocessing import (
FeatureIndexEncoder, StatelessOneHotEncoder,
StatelessTemporalOneHotEncoder, StatelessPeriodicEncoder
)
from timeserio.preprocessing.encoding import PeriodicEncoder
class TestFeatureIndexEncoder:
@pytest.mark.parametrize(
'n_labels, expected_encoding', [
(1, np.arange(1)),
(2, np.arange(2)),
(3, np.arange(3)),
]
)
def test_feature_encoder(self, n_labels, expected_encoding):
encoder = FeatureIndexEncoder()
labels = np.array(
[f'label{idx}' for idx in range(n_labels)]
).reshape(-1, 1)
new_ids = encoder.fit_transform(labels)
assert isinstance(new_ids, np.ndarray)
assert len(new_ids.shape) == 2
assert new_ids.shape[1] == 1
assert set(new_ids.ravel() == set(expected_encoding.ravel()))
class TestStatelessOneHotEncoder:
n_rows = 10
def test_invalid_n_values(self):
with pytest.raises(ValueError):
StatelessOneHotEncoder(n_features=1, n_values='auto')
@pytest.mark.parametrize(
'n_features, n_values, categories', [
(1, 3, [[0, 1, 2]]),
(2, 3, [[0, 1, 2], [0, 1, 2]])
]
)
def test_same_as_stateful(
self, n_features, n_values, categories, random
):
x = np.random.randint(
0, np.min(n_values), size=(self.n_rows, n_features)
)
stateful_enc = OneHotEncoder(
categories=categories,
sparse=False
)
stateless_enc = StatelessOneHotEncoder(
n_features=n_features,
n_values=n_values,
sparse=False
)
x0 = stateful_enc.fit_transform(x)
x1 = stateless_enc.transform(x)
npt.assert_allclose(x1, x0)
@pytest.mark.parametrize(
'n_features, n_values, categories', [
(1, [3], [[0, 1, 2]]),
(2, [3, 2], [[0, 1, 2], [0, 1]])
]
)
def test_same_as_stateful_for_multiple_n_values(
self, n_features, n_values, categories, random
):
x = np.hstack([
np.random.randint(0, np.min(_n_values), size=(self.n_rows, 1))
for _n_values in n_values
])
stateful_enc = OneHotEncoder(
categories=categories,
sparse=False
)
stateless_enc = StatelessOneHotEncoder(
n_features=n_features,
n_values=n_values,
sparse=False
)
x0 = stateful_enc.fit_transform(x)
x1 = stateless_enc.transform(x)
npt.assert_allclose(x1, x0)
class TestStatelessTemporalOneHotEncoder:
n_rows = 3
@pytest.mark.parametrize('n_values', ['all', [True], [0]])
def test_invalid_n_values(self, n_values):
with pytest.raises(ValueError):
StatelessTemporalOneHotEncoder(n_features=1, n_values=n_values)
def test_temporal_onehot(self):
x = np.array([
[0, 0, 1, 1],
[0, 1, 0, 1],
])
y_expected = np.array(
[
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
]
)
n_values = 2
enc = StatelessTemporalOneHotEncoder(
n_features=x.shape[1], n_values=n_values, sparse=False
)
y = enc.fit_transform(x)
npt.assert_allclose(y, y_expected)
class TestPeriodicEncoder:
n_rows = 10
column = np.linspace(0, 1, num=n_rows)
column_sin = np.sin(2 * np.pi * column)
column_cos = np.cos(2 * np.pi * column)
column_stacked = np.vstack([column_sin, column_cos]).T
def array(self, n_features):
x = np.arange(n_features)
y = self.column
_, X = np.meshgrid(x, y)
return X
@pytest.mark.parametrize('periodic_features', [[], [False]])
def test_single_column_no_transform(self, periodic_features):
enc = PeriodicEncoder(periodic_features=periodic_features, period=1)
X = self.array(n_features=1)
Xt = enc.fit_transform(X)
npt.assert_array_equal(X, Xt)
@pytest.mark.parametrize('periodic_features', ['all', [0], [True]])
def test_single_column(self, periodic_features):
enc = PeriodicEncoder(periodic_features=periodic_features, period=1)
X = self.array(n_features=1)
Xt = enc.fit_transform(X)
npt.assert_allclose(Xt, self.column_stacked)
@pytest.mark.parametrize('n_features', [2])
@pytest.mark.parametrize(
'periodic_features', ['all', [0, 1], [True, True]]
)
def test_multi_column(self, n_features, periodic_features):
enc = PeriodicEncoder(periodic_features=periodic_features, period=1)
X = self.array(n_features=2)
Xt = enc.fit_transform(X)
npt.assert_allclose(Xt[:, ::2], self.column_stacked)
npt.assert_allclose(Xt[:, 1::2], self.column_stacked)
class TestStatelessPeriodicEncoder:
n_rows = 10
@pytest.mark.parametrize(
'n_features, periodic_features, period', [
(1, 'all', 1.), (2, 'all', 1.), (2, 'all', [1., 2.]),
(2, [True, False], 3), (2, [1], 3)
]
)
def test_same_as_stateful(self, n_features, periodic_features, period):
x = np.random.randint(0, 10, size=(self.n_rows, n_features))
stateful_enc = PeriodicEncoder(
periodic_features=periodic_features, period=period
)
stateless_enc = StatelessPeriodicEncoder(
n_features=n_features,
periodic_features=periodic_features,
period=period
)
x0 = stateful_enc.fit_transform(x)
x1 = stateless_enc.transform(x)
npt.assert_array_equal(x1, x0)
| 5,836 | 2,062 |
# Importing modules
import numpy as np
import porepy as pp
import itertools
from time import time
from model import model
#%% Functions
def make_constrained_mesh(h=0.1):
"""
Creates unstructured mesh for a given target mesh size for the case of a
single vertical fracture embedded in the domain
Parameters
----------
h : float, optional
Target mesh size. The default is 0.1.
Returns
-------
gb : PorePy Object
Porepy grid bucket object.
"""
domain = {"xmin": 0, "xmax": 1, "ymin": 0, "ymax": 1}
network_2d = pp.fracture_importer.network_2d_from_csv("network.csv", domain=domain)
# Target lengths
target_h_bound = h
target_h_fract = h
mesh_args = {"mesh_size_bound": target_h_bound, "mesh_size_frac": target_h_fract}
# Construct grid bucket
gb = network_2d.mesh(mesh_args, constraints=[1, 2])
return gb
def create_non_matching_gridbucket(h_2d, h_1d, h_mortar):
"""
Generates a gridbucket containing non-matching grids
Parameters
----------
h_2d : Float
Mesh size of the higher-dimensional grid
h_1d : Float
Mesh size of the lower-dimensional grid
h_mortar : Float
Mesh size of the mortar grid
Raises
------
Warning
If the subdomain cells are smaller than the mortar cell
Returns
-------
gb : PorePy object
Grid bucket
"""
# Sanity check
if (h_2d > h_mortar) or (h_1d > h_mortar):
warning_msg = "Subdomain cell are smaller than mortar cells "
warning_msg += "and this may lead to inconsistent results."
raise Warning(warning_msg)
# NOTE: The easiest way to construct the non-matching gridbucket is to
# replace the lower-dimensional grid and the mortar grids into the
# higher-dimensional grid
# Create a grid bucket using h_2d as target mesh size
gb_h = make_constrained_mesh(h_2d)
gl_old = gb_h.grids_of_dimension(1)[0] # extract 1d-grid
mg_old = gb_h.get_mortar_grids()[0] # extract mortar-grid
# Obtain fracture and mortar grids to be replaced into
gl_new = make_constrained_mesh(h_1d).grids_of_dimension(1)[0]
mg_new = make_constrained_mesh(h_mortar).get_mortar_grids()[0]
# Create the mapping dictionaries
g_map = {gl_old: gl_new}
mg_map = {mg_old: mg_new.side_grids}
# Replace grids
gb = gb_h.copy()
gb.replace_grids(g_map=g_map)
gb.replace_grids(mg_map=mg_map)
return gb
#%% Defining numerical methods, and obtaining grid buckets
num_methods = ["TPFA", "MPFA", "RT0", "MVEM"]
levels = 5 # coarsening levels
coarsening_factor = 2
h_2d_ref = 0.003125 # reference 2D mesh size
h_1d_ref = h_2d_ref * 1.5 # reference 1D mesh size
h_mortar_ref = h_2d_ref * 2.0 # reference mortar mesh size
h_2d = coarsening_factor ** np.arange(levels) * h_2d_ref
h_1d = coarsening_factor ** np.arange(levels) * h_1d_ref
h_mortar = coarsening_factor ** np.arange(levels) * h_mortar_ref
grid_buckets = []
tic = time()
print("Assembling non-matching grid buckets...", end="")
for counter in range(levels):
grid_buckets.append(
create_non_matching_gridbucket(h_2d[counter], h_1d[counter], h_mortar[counter])
)
grid_buckets = grid_buckets[::-1]
print(f"\u2713 Time {time() - tic}\n")
#%% Create dictionary and initialize fields
d = {k: {} for k in num_methods}
for method in num_methods:
d[method] = {
"mesh_size": [],
"error_estimate_2d": [],
"true_error_pressure_2d": [],
"true_error_velocity_2d": [],
"mesh_size_2d": [],
"error_estimate_1d": [],
"true_error_pressure_1d": [],
"true_error_velocity_1d": [],
"mesh_size_1d": [],
"error_estimate_mortar": [],
"true_error_pressure_mortar": [],
"true_error_velocity_mortar": [],
"mesh_size_mortar": [],
"majorant": [],
"true_error_pressure": [],
"true_error_velocity": [],
"I_eff_pressure": [],
"I_eff_velocity": [],
"I_eff_combined": [],
}
#%% Populate fields (NOTE: This loop may take considerable time)
for i in itertools.product(num_methods, grid_buckets):
# Print info in the console
print("Solving with", i[0], "for refinement level", grid_buckets.index(i[1]) + 1)
# Get hold of errors
tic = time()
(
h_max,
error_estimate_2d,
true_error_pressure_2d,
true_error_velocity_2d,
mesh_size_2d,
error_estimate_1d,
true_error_pressure_1d,
true_error_velocity_1d,
mesh_size_1d,
error_estimates_mortar,
true_error_pressure_mortar,
true_error_velocity_mortar,
mesh_size_mortar,
majorant,
true_error_pressure,
true_error_velocity,
I_eff_pressure,
I_eff_velocity,
I_eff_combined,
) = model(i[1], i[0])
print(f"Done. Time {time() - tic}\n")
# Store errors in the dictionary
d[i[0]]["mesh_size"].append(h_max)
d[i[0]]["error_estimate_2d"].append(error_estimate_2d)
d[i[0]]["true_error_pressure_2d"].append(true_error_pressure_2d)
d[i[0]]["true_error_velocity_2d"].append(true_error_velocity_2d)
d[i[0]]["mesh_size_2d"].append(mesh_size_2d)
d[i[0]]["error_estimate_1d"].append(error_estimate_1d)
d[i[0]]["true_error_pressure_1d"].append(true_error_pressure_1d)
d[i[0]]["true_error_velocity_1d"].append(true_error_velocity_1d)
d[i[0]]["mesh_size_1d"].append(mesh_size_1d)
d[i[0]]["error_estimate_mortar"].append(error_estimates_mortar)
d[i[0]]["true_error_pressure_mortar"].append(true_error_pressure_mortar)
d[i[0]]["true_error_velocity_mortar"].append(true_error_velocity_mortar)
d[i[0]]["mesh_size_mortar"].append(mesh_size_mortar)
d[i[0]]["majorant"].append(majorant)
d[i[0]]["true_error_pressure"].append(true_error_pressure)
d[i[0]]["true_error_velocity"].append(true_error_velocity)
d[i[0]]["I_eff_pressure"].append(I_eff_pressure)
d[i[0]]["I_eff_velocity"].append(I_eff_velocity)
d[i[0]]["I_eff_combined"].append(I_eff_combined)
#%% Exporting
# Permutations
rows = len(num_methods) * len(grid_buckets)
# Initialize lists
num_method_name = []
diam_2d = []
diam_1d = []
diam_mortar = []
col_2d_estimate = []
col_1d_estimate = []
col_mortar_estimate = []
col_majorant = []
col_true_error_pressure = []
col_true_error_velocity = []
I_eff_pressure = []
I_eff_velocity = []
I_eff_combined = []
# Populate lists
for i in itertools.product(num_methods, range(levels)):
num_method_name.append(i[0])
diam_2d.append(d[i[0]]["mesh_size_2d"][i[1]])
diam_1d.append(d[i[0]]["mesh_size_1d"][i[1]])
diam_mortar.append(d[i[0]]["mesh_size_mortar"][i[1]])
col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]])
col_1d_estimate.append(d[i[0]]["error_estimate_1d"][i[1]])
col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]])
col_majorant.append(d[i[0]]["majorant"][i[1]])
col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]])
col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]])
I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]])
I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]])
I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]])
# Prepare for exporting
export = np.zeros(rows,
dtype=[ ('var2', 'U6'),
('var3', float), ('var4', float),
('var5', float), ('var6', float),
('var7', float), ('var8', float),
('var9', float), ('var10', float),
('var11', float), ('var12', float),
('var13', float), ('var14', float)
])
# Declaring column variables
export['var2'] = num_method_name
export['var3'] = diam_2d
export['var4'] = diam_1d
export['var5'] = diam_mortar
export['var6'] = col_2d_estimate
export['var7'] = col_1d_estimate
export['var8'] = col_mortar_estimate
export['var9'] = col_majorant
export['var10'] = col_true_error_pressure
export['var11'] = col_true_error_velocity
export['var12'] = I_eff_pressure
export['var13'] = I_eff_velocity
export['var14'] = I_eff_combined
# Formatting string
fmt = "%6s %2.5f %2.5f %2.5f %2.2e %2.2e "
fmt += "%2.2e %2.2e %2.2e %2.2e %2.2f %2.2f %2.2f"
# Headers
header = "num_method h_2d, h_1d, h_mortar, eta_2d eta_1d eta_mortar "
header += "majorant true_error_p true_error_u I_eff_p I_eff_u I_eff_pu"
# Writing into txt
np.savetxt('validation2d.txt', export, delimiter=',', fmt=fmt, header=header)
#%% Exporting to LaTeX
# Permutations
rows = len(num_methods) * len(grid_buckets)
# Initialize lists
ampersend = []
for i in range(rows): ampersend.append('&')
num_method_name = []
diam_2d = []
diam_1d = []
diam_mortar = []
col_2d_estimate = []
col_1d_estimate = []
col_mortar_estimate = []
col_majorant = []
col_true_error_pressure = []
col_true_error_velocity = []
I_eff_pressure = []
I_eff_velocity = []
I_eff_combined = []
# Populate lists
for i in itertools.product(num_methods, range(levels)):
num_method_name.append(i[0])
diam_2d.append(d[i[0]]["mesh_size_2d"][i[1]])
diam_1d.append(d[i[0]]["mesh_size_1d"][i[1]])
diam_mortar.append(d[i[0]]["mesh_size_mortar"][i[1]])
col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]])
col_1d_estimate.append(d[i[0]]["error_estimate_1d"][i[1]])
col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]])
col_majorant.append(d[i[0]]["majorant"][i[1]])
col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]])
col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]])
I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]])
I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]])
I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]])
# Prepare for exporting
export = np.zeros(rows,
dtype=[ ('var2', 'U6'),
('var3', float), ('var4', float),
('var5', float), ('var6', float),
('amp1', 'U6'), ('var7', float),
('amp2', 'U6'), ('var8', float),
('amp3', 'U6'), ('var9', float),
('amp4', 'U6'), ('var10', float),
('amp5', 'U6'), ('var11', float),
('amp6', 'U6'), ('var12', float),
('amp7', 'U6'), ('var13', float),
('amp8', 'U6'), ('var14', float)
])
# Prepare for exporting
export['var2'] = num_method_name
export['var3'] = diam_2d
export['var4'] = diam_1d
export['var5'] = diam_mortar
export['var6'] = col_2d_estimate
export['amp1'] = ampersend
export['var7'] = col_1d_estimate
export['amp2'] = ampersend
export['var8'] = col_mortar_estimate
export['amp3'] = ampersend
export['var9'] = col_majorant
export['amp4'] = ampersend
export['var10'] = col_true_error_pressure
export['amp5'] = ampersend
export['var11'] = col_true_error_velocity
export['amp6'] = ampersend
export['var12'] = I_eff_pressure
export['amp7'] = ampersend
export['var13'] = I_eff_velocity
export['amp8'] = ampersend
export['var14'] = I_eff_combined
# Formatting string
fmt = "%6s %2.5f %2.5f %2.5f %2.2e %1s %2.2e %1s %2.2e "
fmt += "%1s %2.2e %1s %2.2e %1s %2.2e %1s %2.2f %1s %2.2f %1s %2.2f"
# Headers
header = "num_method h_2d h_1d h_mortar eta_2d & eta_1d & eta_mortar & "
header += "majorant & true_error_p & true_error_u & I_eff_p & I_eff_u & I_eff_pu"
np.savetxt('validation2d_tex.txt',
export,
delimiter=',',
fmt=fmt,
header=header
)
| 11,654 | 4,645 |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Alcatel.AOS.get_portchannel
# ----------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetportchannel import IGetPortchannel
import re
class Script(BaseScript):
name = "Alcatel.AOS.get_portchannel"
interface = IGetPortchannel
rx_line = re.compile(r"^\s+(?P<port>\d+)\s+(Static|Dynamic)", re.MULTILINE)
rx_line1 = re.compile(r"\s+(?P<interface>\d+\/\d+)\s+\S+\s+", re.MULTILINE)
rx_line2 = re.compile(
r"^\s+(?P<interface>\d+\/\d+)\s+\S+\s+\d+\s+\S+\s+(?P<port>\d+)", re.MULTILINE
)
def execute(self):
r = []
data = self.cli("show linkagg")
data1 = ""
for match in self.rx_line.finditer(data):
port = int(match.group("port"))
members = []
if self.match_version(version__gte="6.3.4"):
data1 = self.cli("show linkagg %i port" % port)
for match1 in self.rx_line1.finditer(data1):
members += [match1.group("interface")]
else:
if not data1:
data1 = self.cli("show linkagg port")
for match1 in self.rx_line2.finditer(data1):
if int(match1.group("port")) == port:
members += [match1.group("interface")]
r += [
{
"interface": "%i" % port,
"members": members,
# <!> TODO: port-channel type detection
"type": "L",
}
]
return r
| 1,850 | 560 |
# Generated by Django 2.1.3 on 2019-04-11 09:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('djfauth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccountTurnOver',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ac_type', models.CharField(choices=[('out', '支出'), ('in', '收入'), ('exp', '消费')], max_length=50)),
('amount', models.DecimalField(decimal_places=2, default=0, max_digits=20)),
('comment', models.CharField(max_length=500)),
('create_dt', models.DateField(verbose_name='created date')),
('update_dt', models.DateTimeField(auto_now=True, null=True, verbose_name='updated date')),
],
),
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('as_iden', models.CharField(max_length=200)),
('as_organ', models.CharField(max_length=200)),
('amount', models.DecimalField(decimal_places=2, default=0, max_digits=20)),
('bill_date', models.CharField(blank=True, max_length=200, null=True)),
('repayment_date', models.CharField(blank=True, max_length=200, null=True)),
('credit_limit', models.CharField(blank=True, max_length=200, null=True)),
('year_rate', models.CharField(blank=True, max_length=200, null=True)),
('comment', models.CharField(blank=True, max_length=500, null=True)),
('create_dt', models.DateField(verbose_name='created date')),
('update_dt', models.DateTimeField(auto_now=True, null=True, verbose_name='updated date')),
],
),
migrations.CreateModel(
name='AssetType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assetname', models.CharField(max_length=200)),
('detailtype', models.CharField(max_length=200)),
('create_dt', models.DateField(verbose_name='created date')),
('update_dt', models.DateTimeField(auto_now=True, null=True, verbose_name='updated date')),
],
),
migrations.AddField(
model_name='asset',
name='as_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finance.AssetType'),
),
migrations.AddField(
model_name='asset',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djfauth.User'),
),
migrations.AddField(
model_name='accountturnover',
name='asset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finance.Asset'),
),
migrations.AddField(
model_name='accountturnover',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djfauth.User'),
),
]
| 3,398 | 1,014 |
__author__ = 'hvishwanath'
import yaml
from models import *
class PlanParser(object):
@classmethod
def parse(cls, planfile):
y = yaml.safe_load(file(planfile))
return CAMPPlan.create_from_dict(y)
| 226 | 81 |
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""
Module containing generic graphical user interface widgets.
"""
# Ensure python 2.5 compatibility
import matplotlib.cm
# General imports
import pyfits
import numpy as np
# Gui library imports
try:
from PyQt4.QtCore import QString
except ImportError:
QString = str
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import CirclePolygon, Rectangle
# Salt imports
import saltsafeio
from salterror import SaltError, SaltIOError
from saltimagetools import find_object, zscale
class PhotometryConfigWidget(QtGui.QWidget):
"""Configure dialog for photometry.
Has settings for:
* target position, size
* target background
* type (anulus/region)
* parameters
* comparison position, size
* comparison background
* type (anulus/region)
* parameters
"""
def __init__(self, imdisplay, config, imlist=None, number=1, parent=None):
"""Setup widget.
*imdisplay* a `FitsDisplay` derived fits display widget,
*imlist* a list of fits image filenames,
*config* filename used for output configuration file,
*number* image number to load on startup,
*parent* parent widget.
"""
# Set default parameters
self.imlist=imlist
self.number=number
self.config=config
self.amp={'target' : 1, 'comparison' : 1 }
# Set default marker
self.mark_with='circle'
# Set default search distance for recentering
self.distance=5
# Default line style parameters
self.line={ 'target' : { 'color' : 'g', 'width' : 2 },
'comparison' : { 'color' : 'g', 'width' : 2 }}
# Import gui
from ui_photometryconfigwidget import Ui_PhotometryConfigWidget
# Setup widget
QtGui.QWidget.__init__(self, parent)
# Bind gui to widget
self.ui = Ui_PhotometryConfigWidget()
self.ui.setupUi(self)
# Destroy widget on close
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# Connect to display window
self.imdisplay=imdisplay
# Connect position selected signal from display to event handler
self.connect(self.imdisplay, QtCore.SIGNAL('positionSelected(float, float)'), self.selectionHandler)
# Set current display widget for positionSelected signal
self.xdisplay=[]
self.ydisplay=[]
self.rdisplay=[]
# Keep track of currently displayed objects
self.display={'target' : {'position' : False,
'annulus' : False,
'region' : False },
'comparison' : {'position' : False,
'annulus' : False,
'region' : False }}
# Keep track of input widgets
self.parameters=['x','y','r','r1','r2','x1','y1','x2','y2']
self.input={'target' : { 'x' : self.ui.tgtXLineEdit,
'y' : self.ui.tgtYLineEdit,
'r' : self.ui.tgtRLineEdit,
'r1' : self.ui.tgtR1LineEdit,
'r2' : self.ui.tgtR2LineEdit,
'x1' : self.ui.tgtX1LineEdit,
'y1' : self.ui.tgtY1LineEdit,
'x2' : self.ui.tgtX2LineEdit,
'y2' : self.ui.tgtY2LineEdit},
'comparison' : { 'x' : self.ui.cmpXLineEdit,
'y' : self.ui.cmpYLineEdit,
'r' : self.ui.cmpRLineEdit,
'r1' : self.ui.cmpR1LineEdit,
'r2' : self.ui.cmpR2LineEdit,
'x1' : self.ui.cmpX1LineEdit,
'y1' : self.ui.cmpY1LineEdit,
'x2' : self.ui.cmpX2LineEdit,
'y2' : self.ui.cmpY2LineEdit}}
# Keep track of capture buttons
self.buttons=['position','radius','annulus','region']
self.capture={'target' \
: {'position' : self.ui.captureTgt,
'radius' : self.ui.captureTgtRadius,
'annulus' : self.ui.captureTgtAnulusBackground,
'region' : self.ui.captureTgtRegionBackground},
'comparison' \
: {'position' : self.ui.captureCmp,
'radius' : self.ui.captureCmpRadius,
'annulus' : self.ui.captureCmpAnulusBackground,
'region' : self.ui.captureCmpRegionBackground}}
# Keep track of checkbox recenter widgets
self.recenter={'target' : self.ui.tgtRecenterCheckBox,
'comparison' : self.ui.cmpRecenterCheckBox}
self.centered={'target' : False,
'comparison' : False}
# Enable blocking of redraws
self.block={'target' : { 'x' : False,
'y' : False,
'r' : False,
'r1' : False,
'r2' : False,
'x1' : False,
'y1' : False,
'x2' : False,
'y2' : False},
'comparison' : { 'x' : False,
'y' : False,
'r' : False,
'r1' : False,
'r2' : False,
'x1' : False,
'y1' : False,
'x2' : False,
'y2' : False}}
# Set validator to ensure valid input on lineEdit input widgets
self.validator = QtGui.QDoubleValidator(self)
for object in ['target','comparison']:
for key in self.parameters:
self.input[object][key].setValidator(self.validator)
# Set signal mapper for lineEdit updates
self.drawMapper = QtCore.QSignalMapper(self)
# Connect lineEdit updates to signal mapper
for object in ['target','comparison']:
for key in self.parameters:
# Add signal map entry
self.drawMapper.setMapping(self.input[object][key],
QString(object+','+key))
# Connect to signal mapper
self.connect(self.input[object][key], QtCore.SIGNAL('textChanged(QString)'), self.drawMapper, QtCore.SLOT('map()'))
# Connect signal mapper to draw handler
self.connect(self.drawMapper, QtCore.SIGNAL('mapped(QString)'),
self.textUpdated)
# Set signal mapper for capture buttons
self.captureMapper = QtCore.QSignalMapper(self)
# Connect capture button signals to signal mapper
for object in ['target','comparison']:
for key in self.buttons:
# Add signal map entry
self.captureMapper.setMapping(self.capture[object][key],
QString(object+','+key))
# Connect to signal mapper
self.connect(self.capture[object][key], QtCore.SIGNAL('clicked()'), self.captureMapper, QtCore.SLOT('map()'))
# Connect signal mapper to capture handler
self.connect(self.captureMapper, QtCore.SIGNAL('mapped(QString)'),
self.captureHandler)
# Connect save button
self.connect(self.ui.saveButton, QtCore.SIGNAL('clicked()'), self.save)
# If an image list is given
if self.imlist is not None:
# Connect image selection spinBox to event handlers
self.connect(self.ui.imageSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.loadImage)
self.connect(self.ui.imageSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.redraw)
# Load first image
self.setImageNumber(self.number)
# Hide end selection widgets (not implemented here)
self.ui.tgtEndPosLabel.hide()
self.ui.tgtEndXLabel.hide()
self.ui.tgtEndYLabel.hide()
self.ui.cmpEndPosLabel.hide()
self.ui.cmpEndXLabel.hide()
self.ui.cmpEndYLabel.hide()
self.ui.tgtXEndLineEdit.hide()
self.ui.tgtYEndLineEdit.hide()
self.ui.cmpXEndLineEdit.hide()
self.ui.cmpYEndLineEdit.hide()
self.ui.captureTgtEnd.hide()
self.ui.captureCmpEnd.hide()
def setImageNumber(self,number):
"""Set the image number."""
self.ui.imageSpinBox.setValue(number)
def loadImage(self, number):
"""Loads a new image.
*number* is the image number to be loaded.
This function uses `saltsafeio.getexposure` to get the correct
exposure from a list of fits files containing an arbitrary number
of extensions.
"""
# Emit signal
self.emit(QtCore.SIGNAL("imageNumberUpdated(int)"), number)
# Load image from file
self.img=saltsafeio.get_exposure(self.imlist,number)
# Display image
self.imdisplay.loadImage(self.img)
# Redraw canvas
self.imdisplay.redraw_canvas()
def mark(self,*args,**kwargs):
if self.mark_with=='square':
self.imdisplay.addSquare(*args,**kwargs)
elif self.mark_with=='circle':
self.imdisplay.addCircle(*args,**kwargs)
def textUpdated(self,key):
# Get object and parameter from key
obj,par=str(key).split(',')
# Check block
if self.block[obj][par]:
return
# Set block to prevent infinite repeat
self.block[obj][par]=True
# Recenter on object if requested
if par=='x' and self.recenter[obj].isChecked() and not self.centered[obj]:
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj]['r'].text())
x,y=find_object(self.img,x,y,self.distance)
self.input[obj]['x'].setText(str(x))
self.input[obj]['y'].setText(str(y))
self.centered[obj]=not(self.centered[obj])
# Check if object region size locking is on
if self.ui.lockObjectSizes.isChecked():
if par=='r':
r=self.input[obj]['r'].text()
if obj=='target':
self.input['comparison']['r'].setText(r)
elif obj=='comparison':
self.input['target']['r'].setText(r)
# Check if background size locking is on
if self.ui.lockBackgroundSize.isChecked():
if par in ['r1','r2']:
r=self.input[obj][par].text()
if obj=='target':
self.ui.cmpAnulusRadioButton.setChecked(True)
self.input['comparison'][par].setText(r)
elif obj=='comparison':
self.ui.tgtAnulusRadioButton.setChecked(True)
self.input['target'][par].setText(r)
elif par in ['x1','y1','x2','y2']:
c=self.input[obj][par].text()
if obj=='target':
self.ui.cmpRegionRadioButton.setChecked(True)
self.input['comparison'][par].setText(c)
elif obj=='comparison':
self.ui.tgtRegionRadioButton.setChecked(True)
self.input['target'][par].setText(c)
# Check if background region centering
if self.ui.allignTgtVerticalCenter.isChecked():
if par in ['y1','y2']:
y=float(self.input[obj][par].text())
center=self.img.shape[0]/2.0
height=abs(y-center)
self.input[obj]['y1'].setText(str(center+height))
self.input[obj]['y2'].setText(str(center-height))
# Draw markers
self.draw(key)
# Unset block
self.block[obj][par]=False
def draw(self,key):
"""Draws markers for object positions, and backgrounds.
To be called when any input widget value changes.
*key* is given by the signal mapper and consists of a string with
the object and parameter separated by a comma.
"""
# Get object and parameter from key
obj,par=str(key).split(',')
try:
# Set amplifier
self.amp[obj]=self.getCurrentAmp()
# Draw markers
if par=='x' or par=='y' or par=='r':
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj]['r'].text())
self.display[obj]['position']=True
self.mark(obj,x,y,r,color=self.line[obj]['color'],lw=self.line[obj]['width'])
elif par=='r1' or par=='r2':
# Annulus is selected so remove region marker
self.imdisplay.removePatch(obj+'_region')
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj][par].text())
# Keep track of the selected background mode
self.display[obj]['annulus']=True
self.display[obj]['region']=False
self.mark(obj+'_'+par,x,y,r,color=self.line[obj]['color'],lw=self.line[obj]['width'])
elif par=='x1' or par=='y1' or par=='x2' or par=='y2':
# Region is selected so remove annulus markers
self.imdisplay.removePatch(obj+'_r1')
self.imdisplay.removePatch(obj+'_r2')
x1=float(self.input[obj]['x1'].text())
y1=float(self.input[obj]['y1'].text())
x2=float(self.input[obj]['x2'].text())
y2=float(self.input[obj]['y2'].text())
# Keep track of the selected background mode
self.display[obj]['annulus']=False
self.display[obj]['region']=True
self.imdisplay.addRectangle(obj+'_region',x1,y1,x2,y2,
color=self.line[obj]['color'],lw=self.line[obj]['width'])
# Redraw canvas
self.imdisplay.redraw_canvas(keepzoom=True)
except ValueError:
pass
def redraw(self, number):
"""Redraws object and background markers for all objects on the
currently displayed amplifier *number*.
"""
self.imdisplay.reset()
# Find wich amplifier is currently displayed
amp=self.getCurrentAmp()
# (Re)draw markers
for obj in ['target','comparison']:
if self.amp[obj]==amp:
if self.display[obj]['position']:
self.draw(obj+','+'r')
if self.display[obj]['annulus']:
self.draw(obj+','+'r1')
self.draw(obj+','+'r2')
if self.display[obj]['region']:
self.draw(obj+','+'y2')
def getCurrentAmp(self, namps=4):
"""Returns the currently displayed amplifier.
*namps* is the number of amplifiers on the CCD.
"""
# Get exposure number
n=int(self.ui.imageSpinBox.value())
# Convert exposure number to current amplifier number
amp=n%namps
if amp==0:
amp=namps
return amp
def captureHandler(self, key):
"""Called when a capture button is clicked.
*key* is given by the signal mapper and consists of a string with
the object and parameter separated by a comma.
Depending on the *key* input widgets are added to the current
display lists.
Subsequent calls to `self.selectionHandler` get displayed in
the listed widgets.
"""
# Get object and parameter from key
obj,par=str(key).split(',')
# Add input widgets to lists
if par=='position':
self.xdisplay=[self.input[obj]['x']]
self.ydisplay=[self.input[obj]['y']]
self.rdisplay=[]
elif par=='radius':
self.xdisplay=[]
self.ydisplay=[]
self.x=float(self.input[obj]['x'].text())
self.y=float(self.input[obj]['y'].text())
self.rdisplay=[self.input[obj]['r']]
elif par=='annulus':
self.xdisplay=[]
self.ydisplay=[]
self.x=float(self.input[obj]['x'].text())
self.y=float(self.input[obj]['y'].text())
self.rdisplay=[self.input[obj]['r1'], self.input[obj]['r2']]
elif par=='region':
self.xdisplay=[self.input[obj]['x1'], self.input[obj]['x2']]
self.ydisplay=[self.input[obj]['y1'], self.input[obj]['y2']]
self.rdisplay=[]
def selectionHandler(self, x, y):
"""Event handler for click in image display window.
*x*, *y* is the position (in image pixel coordinates) of the click.
These positions are inserted into the first input widgets in the
display lists.
If a radius is requested this is calculated from the position given
in (self.x, self.y) which should be set to the current object.
"""
if len(self.xdisplay)>0:
display=self.xdisplay.pop(0)
display.setText(str(x))
if len(self.ydisplay)>0:
display=self.ydisplay.pop(0)
display.setText(str(y))
if len(self.rdisplay)>0:
r=np.sqrt((x-self.x)**2+(y-self.y)**2)
display=self.rdisplay.pop(0)
display.setText(str(r))
def setSearchDistance(self, distance):
"""Set search distance used for recentering."""
self.distance=int(distance)
def setMarkerType(self, marker):
"""Set marker type to 'circle' or 'square'."""
if marker in ['circle','square']:
self.mark_with=marker
else:
raise SaltIOError('Unknown marker type '+str(marker))
def setLineColor(self, object, color):
"""Changes the default line color used for marking."""
self.line[object]['color']=color
def setLineWidth(self, object, width):
"""Changes the default line width used for marking."""
self.line[object]['width']=width
def save(self):
"""Save configuration.
The format is::
For objects that use an anullus:
object amp x y r r1 r2
For objects that use a region:
object amp x y r x1 y1 x2 y2
"""
if (self.ui.tgtAnulusRadioButton.isChecked() and self.ui.cmpRegionRadioButton.isChecked()) or \
(self.ui.tgtRegionRadioButton.isChecked() and self.ui.cmpAnulusRadioButton.isChecked()):
msg='SLOTPREVIEW--SLOTPHOT can not handle different background types'
raise SaltError(msg)
# Write values to file
with open(self.config,'w') as f:
for i,obj in enumerate(['target','comparison']):
b_type='region'
if obj=='target':
print(obj, self.ui.tgtAnulusRadioButton.isChecked())
if self.ui.tgtAnulusRadioButton.isChecked(): b_type='annulus'
elif obj=='comparison':
if self.ui.cmpAnulusRadioButton.isChecked(): b_type='annulus'
# If r1 is not zero, assumes annulus
line='%i\t%i\t' % (i+1, self.amp[obj])
if b_type=='annulus':
line+=''.join('%3.2f\t' % float(self.input[obj][key].text()) for key in ['x', 'y', 'r', 'r1', 'r2'])
else:
line+=''.join('%3.2f\t' % float(self.input[obj][key].text()) for key in ['x', 'y', 'r', 'x1', 'y2', 'x2', 'y2'])
# Write string to configfile
f.write(line.rstrip()+'\n')
# Exit program
self.close()
| 22,987 | 6,488 |
"""
Add Two Numbers: Leetcode 2
You are given two non-empty linked lists representing two non-negative integers.
The digits are stored in reverse order, and each of their nodes contains a single digit.
Add the two numbers and return the sum as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
"""
This is how addition works (Elementary Math):
111 <- carried values
|||
7692
+3723
-----
0426
-----
"""
# O(max(m,n)) time | O(max(m,n)) space | m=len(l1), n=len(l2)
def addTwoNumbers(self, l1, l2):
# declare pointers
p1 = l1
p2 = l2
# used to store the carry value
carry = 0
# declare result linked list
result = ListNode()
res_curr = result # position on the result linked list
# remember to add the 'carry' edge case to the while loop
# example 119 + 119
while p1 != None or p2 != None or carry != 0:
top = 0
bottom = 0
if p1 != None:
top = p1.val
p1 = p1.next
if p2 != None:
bottom = p2.val
p2 = p2.next
my_sum = carry + top + bottom
# check if we'll carry
# max of my_sum is 19
if my_sum > 9: # carry value
res_curr.next = ListNode(val=my_sum-10)
carry = 1
else:
res_curr.next = ListNode(val=my_sum)
carry = 0
res_curr = res_curr.next
# skip the node we created during initialization of the linked list
return result.next
| 1,895 | 576 |
import pandas as pd
import sqlite3
df = pd.read_csv("buddymove_holidayiq.csv")
connection = sqlite3.connect("buddymove_holidayiq.sqlite3")
df.to_sql("review", connection)
print(connection.execute("SELECT * FROM review LIMIT 10").fetchall())
| 243 | 84 |
# -*- coding: utf-8 -*-
import tkinter as tk
import sound
import socket
import threading
class ConnClient():
'''
ソケット通信によりラズベリーパイから画像情報を受け取る。
'''
def __init__(self,conn, addr):
self.conn_socket = conn
self.addr = addr
self.recvdata = 0
self.recvdata1 = 0
self.recvdata2 = 0
self.data_list=0
def run(self):
try:
self.recvdata = self.conn_socket.recv(2359296)
self.recvdata1 = self.recvdata.decode('utf-8')
self.recvdata2 = self.recvdata1.split(",")
self.data_list = [int(s) for s in self.recvdata2]
except socket.error:
print("connect error")
def stop(self):
self.conn_socket.close()
def main():
global recvlist
s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_socket.bind((HOSTNAME, PORT))
s_socket.listen(CLIENTNUM)
while (1):
conn, addr = s_socket.accept()
print("Conneted by" + str(addr))
connClientThread = ConnClient(conn, addr)
connClientThread.run()
recvlist = connClientThread.data_list
print(recvlist)
bullsystem(recvlist[0])
def bullsystem(flag):
global throw_number, score, round_total,recvlist
photoorder = recvlist[1]
throw_number += 1
round_total = recvlist[2]
first_throw = recvlist[3]
second_throw = recvlist[4]
third_throw = recvlist[5]
canvas.itemconfig(on_canvas_text1, text=str(first_throw))
canvas.itemconfig(on_canvas_text2, text=str(second_throw))
canvas.itemconfig(on_canvas_text3, text=str(third_throw))
if flag == 1:
play_sounds.sound1()
score += 50
lb.insert(tk.END, str(throw_number)+ "BULL " + str(score))
canvas.itemconfig(
on_canvas_text,
text=str(score)
)
else:
lb.insert(tk.END, str(throw_number)+"NO BULL"+ str(score))
if photoorder == 3 and round_total > 0:
changeimg()
def memo():
value = entry.get()
if not value:
lb.insert(tk.END, "入力してね")
else:
lb.insert(tk.END, value)
entry.delete(0, tk.END)
def changeimg():
global canvas, on_canvas, score, round_total
canvas.move(
on_canvas_text,
1000,
1000
)
canvas.move(
on_canvas_text1,
1000,
1000
)
canvas.move(
on_canvas_text2,
1000,
1000
)
canvas.move(
on_canvas_text3,
1000,
1000
)
if round_total == 50:
canvas.itemconfig(
on_canvas,
image=images[1]
)
elif round_total == 100:
canvas.itemconfig(
on_canvas,
image=images[2]
)
elif round_total == 150:
canvas.itemconfig(
on_canvas,
image=images[3]
)
root.after(3900, play_sounds.sound2)
root.after(7000, rechangeimg)
def rechangeimg():
global root, canvas
canvas.itemconfig(
on_canvas,
image=images[0]
)
canvas.move(
on_canvas_text,
-1000,
-1000
)
canvas.move(
on_canvas_text1,
-1000,
-1000
)
canvas.move(
on_canvas_text2,
-1000,
-1000
)
canvas.move(
on_canvas_text3,
-1000,
-1000
)
def buffer():
#ソケット通信を並列処理
th_body = threading.Thread(target=main, name='main')
th_body.setDaemon(True)
th_body.start()
def rungui():
global root, canvas, on_canvas, images, lb, entry, on_canvas_text, score
global on_canvas_text1, on_canvas_text2, on_canvas_text3
#メインウィンドウ
root = tk.Tk()
root.geometry("1140x675")
root.title("DARTS BULL GAME")
font = ("Helevetica", 14)
font_log = ("Helevetica", 11)
# menubar
menubar = tk.Menu(root)
root.config(menu=menubar)
# startmenu
startmenu = tk.Menu(menubar)
menubar.add_cascade(label="BULL GAME", menu=startmenu)
startmenu.add_command(label="開始する", command=lambda: buffer())
# canvas make
canvas = tk.Canvas(
root,
width=960,
height=600,
relief=tk.RIDGE,
bd=2
)
canvas.place(x=175, y=0)
# image
images.append(tk.PhotoImage(file="501.png"))
images.append(tk.PhotoImage(file="onebull.png"))
images.append(tk.PhotoImage(file="lowton.png"))
images.append(tk.PhotoImage(file="hattrick.png"))
on_canvas = canvas.create_image(
0,
0,
image=images[0],
anchor=tk.NW
)
on_canvas_text = canvas.create_text(
480, 300, text=str(score), font=("Helvetica", 250, "bold")
)
on_canvas_text1 = canvas.create_text(
850, 145, text=0, font=("Helvetica", 40, "bold"), fill='white')
on_canvas_text2 = canvas.create_text(
850, 195, text=0, font=("Helvetica", 40, "bold"), fill='white')
on_canvas_text3 = canvas.create_text(
850, 245, text=0, font=("Helvetica", 40, "bold"), fill='white')
# response_area
response_area = tk.Label(
root,
width=106,
height=4,
bg="gray",
font=font,
relief=tk.RIDGE,
bd=2
)
response_area.place(x=176, y=600)
# entrybox
entry = tk.Entry(
root,
width=75,
font=font
)
entry.place(x=230, y=630)
entry.focus_set()
# listbox
lb = tk.Listbox(
root,
width=20,
height=43,
font=font_log
)
# scroolbar1
sb1 = tk.Scrollbar(
root,
orient=tk.VERTICAL,
command=lb.yview
)
# スクロールバーと連動
lb.configure(yscrollcommand=sb1.set)
lb.grid(row=0, column=0)
sb1.grid(row=0, column=1, sticky=tk.NS)
# button
button = tk.Button(
root,
bg='black',
command=lambda: buffer(),
text="START",
width=19,
)
button.place(x=0, y=655)
# button2
button2 = tk.Button(
root,
width=15,
text="MEMO",
command=lambda: memo())
button2.place(x=950, y=630)
# mainloop
root.mainloop()
if __name__ == "__main__":
lb = None
on_canvas = None
on_canvas_text = None
on_canvas_text1 = None
on_canvas_text2 = None
on_canvas_text3 = None
images = []
entry = None
response_area = None
score = 0
throw_number = 0
play_sounds = sound.Sounds()
HOSTNAME = "192.168.0.3"
PORT = 12345
CLIENTNUM = 1
rungui()
| 6,539 | 2,517 |
import graphene
class TokenData(graphene.ObjectType):
access_token = graphene.String(required=False)
refresh_token = graphene.String(required=False)
| 159 | 48 |
from . import moving_average
from .. import indicators
class BollingerBands:
def __init__(self, values, multiplier=2, moving_average_class=moving_average.SimpleMovingAverage):
self.moving_average_class = moving_average_class
self.multiplier = multiplier
self.moving_average = self.moving_average_class(values)
self.standard_deviation = indicators.StandardDeviation(values)
def add(self, value):
self.moving_average.add(value)
self.standard_deviation.add(value)
@property
def upper(self):
return self.moving_average.value + (self.standard_deviation.value * self.multiplier)
@property
def lower(self):
return self.moving_average.value - (self.standard_deviation.value * self.multiplier)
@property
def bandwidth(self):
return self.upper - self.lower
| 857 | 266 |
# Copyright (c) 2016 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
import testtools
from neutron.common import utils as common_utils
from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager
from neutron.tests import base
NATIVE_OVSDB_CONNECTION = (
'neutron.agent.ovsdb.impl_idl.OvsdbIdl.ovsdb_connection')
class TrunkParentPortTestCase(base.BaseTestCase):
def setUp(self):
super(TrunkParentPortTestCase, self).setUp()
# Mock out connecting to ovsdb
mock.patch(NATIVE_OVSDB_CONNECTION).start()
trunk_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
trunk_mac = common_utils.get_random_mac('fa:16:3e:00:00:00'.split(':'))
self.trunk = trunk_manager.TrunkParentPort(
trunk_id, port_id, trunk_mac)
def test_multiple_transactions(self):
def method_inner(trunk):
with trunk.ovsdb_transaction() as txn:
return id(txn)
def method_outer(trunk):
with trunk.ovsdb_transaction() as txn:
return method_inner(trunk), id(txn)
with self.trunk.ovsdb_transaction() as txn1:
mock_commit = mock.patch.object(txn1, 'commit').start()
txn_inner_id, txn_outer_id = method_outer(self.trunk)
self.assertFalse(mock_commit.called)
self.assertTrue(mock_commit.called)
self.assertTrue(id(txn1) == txn_inner_id == txn_outer_id)
def test_transaction_raises_error(self):
class MyException(Exception):
pass
with testtools.ExpectedException(MyException):
with self.trunk.ovsdb_transaction() as txn1:
mock.patch.object(txn1, 'commit').start()
raise MyException()
self.assertIsNone(self.trunk._transaction)
with self.trunk.ovsdb_transaction() as txn2:
mock.patch.object(txn2, 'commit').start()
self.assertIsNot(txn1, txn2)
| 2,566 | 822 |
"""
Defines the query and how to interact with
"""
from app.schema.types.todo import TodoListResponseField, TodoResponseField
from app.usecases.todo import read_all_todos, read_todo_by_id
def resolve_list_todos(self, info) -> TodoListResponseField:
try:
todos = read_all_todos()
is_success = True
error_message = None
except Exception as e:
error_message = str(e)
is_success = False
todos = None
return TodoListResponseField(
todos=todos, is_success=is_success, error_message=error_message
)
def resolve_get_todo(self, info, todo_id: str) -> TodoResponseField:
todo, is_success = read_todo_by_id(todo_id)
error_message = "This element does not exist." if not is_success else None
return TodoResponseField(
todo=todo, is_success=is_success, error_message=error_message
)
| 873 | 281 |
from google.appengine.ext import db
from models.Trail import Trail
class EntryRating(db.Model):
'''
Stores the ratings users give to locations
Parameters:
entry_key
user_id
rating
'''
trail = db.ReferenceProperty(Trail)
user = db.UserProperty(required = True)
rating = db.RatingProperty(required = True)
def __str__(self) :
return "%s: %s - %f" % (self.user.nickname(), self.title, self.rating)
| 469 | 144 |
import json
from datetime import datetime
from decimal import Decimal
from django import http
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
# Create your views here.
from django.views import View
from django_redis import get_redis_connection
from apps.goods.models import SKU
from apps.orders.models import OrderInfo, OrderGoods
from apps.users.models import Address, User
from meiduo_mall.settings.dev import logger
from utils.response_code import RETCODE
class OrderSettlementView(LoginRequiredMixin,View):
def get(self, request):
user = request.user
try:
addresses = Address.objects.filter(user=user,is_deleted=False)
except Exception as e:
addresses = None
redis_client = get_redis_connection('carts')
carts_data = redis_client.hgetall(user.id)
carts_dict = {}
for key,value in carts_data.items():
sku_key = int(key.decode())
sku_dict = json.loads(value.decode())
if sku_dict["selected"]:
carts_dict[sku_key] = sku_dict
skus = SKU.objects.filter(id__in = carts_dict.keys())
total_count = 0
total_amount = Decimal('0.00')
for sku in skus:
sku.count = carts_dict[sku.id]['count']
sku.amount = sku.price * sku.count
total_count += sku.count
total_amount += sku.price * sku.count
freight = Decimal('10.00')
context = {
'addresses': addresses,
'skus': skus,
'total_count': total_count,
'total_amount': total_amount,
'freight': freight,
'payment_amount': total_amount + freight,
'default_address_id': user.default_address_id
}
return render(request, 'place_order.html', context)
class OrderCommitView(LoginRequiredMixin,View):
def post(self,request):
#接收参数
json_dict = json.loads(request.body.decode())
address_id = json.loads(request.body.decode())['address_id']
pay_method = json.loads(request.body.decode())['pay_method']
user = request.user
#效验
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('WUXIAO')
if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'],OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:
return http.HttpResponseForbidden('不支持')
#订单表__生成订单号 时间戳+9为
# user = request.user
order_id = datetime.now().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id)
#事务
from django.db import transaction
with transaction.atomic():
# --------事物保存点--------
save_id = transaction.savepoint()
try:
order = OrderInfo.objects.create(
order_id=order_id,
user = user,
address = address,
total_count = 0,
total_amount = Decimal('0.00'),
freight = Decimal("10.00"),
pay_method = pay_method,
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else
OrderInfo.ORDER_STATUS_ENUM['UNSEND']
)
redis_client = get_redis_connection('carts')
carts_data = redis_client.hgetall(user.id)
carts_dict = {}
for key,value in carts_data.items():
sku_id = int(key.decode())
sku_dict = json.loads(value.decode())
if sku_dict['selected']:
carts_dict[sku_id] = sku_dict
sku_ids = carts_dict.keys()
for sku_id in sku_ids:
while True:
sku = SKU.objects.get(id=sku_id)
# sku.stock -= cart_count
# sku.sales += cart_count
# sku.sava()
original_stock = sku.stock
original_sales = sku.sales
#判断库存
cart_count = carts_dict[sku_id]['count']
if cart_count > sku.stock:
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})
import time
# time.sleep(10)
new_stock = original_stock - cart_count
new_sales = original_sales + cart_count
result = SKU.objects.filter(id=sku_id, stock=original_stock).update(stock=new_stock,sales=new_sales)
if result == 0:
continue
sku.stock -= cart_count
sku.sales += cart_count
sku.save()
sku.spu.sales += cart_count
sku.spu.save()
# 创建订单商品数据
OrderGoods.objects.create(
order_id = order_id,
sku = sku,
count = cart_count,
price = sku.price,
)
#总个数和总金额(没运费)
order.total_count += cart_count
order.total_amount += sku.price * cart_count
#下单成功或者失败退出
break
#加运费 总金额
order.total_amount += order.freight
order.save()
except Exception as e :
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})
transaction.savepoint_commit(save_id)
#清空购物车
# redis_client.hdel(user.id, *carts_dict)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '下单成功', 'order_id': order.order_id})
class OrderSuccessView(View):
def get(self,request):
order_id = request.GET.get("order_id")
pay_method = request.GET.get("pay_method")
payment_amount = request.GET.get("payment_amount")
context={
"order_id":order_id,
"pay_method":pay_method,
"payment_amount":payment_amount,
}
return render(request,'order_success.html',context)
class OrderShowView(LoginRequiredMixin,View):
def get(self,request,page_num):
username = request.COOKIES.get('username')
user = User.objects.get(username=username)
user_id = user.id
order_data = OrderInfo.objects.all()
goods_data = OrderGoods.objects.all()
order_ids = order_data.filter(user_id=user_id).values('order_id')
# order_ids = OrderInfo.objects.filter(user_id=user_id)
page_orders = {}
# 所有订单号的列表
order_list = []
order_id_count = goods_data.values('order_id', 'count')
order_id_set = set()
for order_data_co in order_id_count:
a = order_data_co['order_id']
order_list.append(a)
order_list =list(set(order_list))
print(order_list)
for order_id in order_ids:
order_id = order_id['order_id'] # 订单号
time_old = order_data.filter(order_id=order_id).values('create_time') # 时间
time = str(time_old[0]['create_time'])
time_new = time[0:16] # 时间
freight = time_old.values('freight')[0]['freight'] # 运费
"""<QuerySet [{'address_id': 1, 'user_id': 19, 'total_count': 1,
'order_id': '20190927003440000000019',
'status': 1, 'pay_method': 2,
'create_time': datetime.datetime(2019, 9, 27, 0, 34, 40, 214624, tzinfo=<UTC>),
'update_time': datetime.datetime(2019, 9, 27, 0, 34, 40, 235034, tzinfo=<UTC>),
'freight': Decimal('10.00'), 'total_amount': Decimal('6698.00')}]>
"""
# if total_amount-freight == 0.00 or total_amount == 0.00:
# continue
#
# page_orders = {}
# for Goods in goods_data:
# page_orders.setdefault(order_id,[time,freight,]).append(Goods)
page_num = 1
"""
下单时间 订单号
商品信息 数量 单价 总价 运费 支付方式 订单状态 """
context = {
"page_orders": page_orders,
# # # 总页数
# # 'total_page': total_page,
# # # 当前页
'page_num': page_num,
}
return render(request,'user_center_order.html',context)
| 8,884 | 2,829 |
import json
import logging
import os
import ssl
import webbrowser
from multiprocessing import Event, Queue
from urllib.request import urlopen
import certifi
from PySide2 import QtCore, QtGui, QtWidgets
from packaging import version
from pie.core import IndexDB, IndexingHelper, MediaProcessor
from pie.domain import IndexingTask, Settings
from pie.log_window import LogWindow
from pie.preferences_window import PreferencesWindow
from pie.util import MiscUtils, QWorker
class TrayIcon(QtWidgets.QSystemTrayIcon):
__APP_VER = "1.0.2"
__logger = logging.getLogger('TrayIcon')
def __init__(self, log_queue: Queue):
super().__init__(QtGui.QIcon(MiscUtils.get_app_icon_path()))
self.log_queue = log_queue
self.preferences_window: PreferencesWindow = None
self.log_window: LogWindow = None
self.indexing_stop_event: Event = None
self.observer = None
self.indexDB = IndexDB()
self.threadpool: QtCore.QThreadPool = QtCore.QThreadPool()
self.__logger.debug("QT multithreading with thread pool size: %s", self.threadpool.maxThreadCount())
self.setToolTip("Batch Media Compressor")
self.activated.connect(self.trayIcon_activated)
tray_menu = QtWidgets.QMenu('Main Menu')
self.startIndexAction = tray_menu.addAction('Start Processing', self.startIndexAction_triggered)
self.stopIndexAction = tray_menu.addAction('Stop Processing', self.stopIndexAction_triggered)
self.stopIndexAction.setEnabled(False)
tray_menu.addSeparator()
self.clearIndexAction = tray_menu.addAction('Clear Indexed Files', self.clearIndexAction_triggered)
self.clearOutputDirsAction = tray_menu.addAction('Clear Ouput Directories', self.clearOutputDirsAction_triggered)
tray_menu.addSeparator()
self.editPrefAction = tray_menu.addAction('Edit Preferences', self.editPreferencesAction_triggered)
self.viewLogsAction = tray_menu.addAction('View Logs', self.viewLogsAction_triggered)
tray_menu.addSeparator()
self.updateCheckAction = tray_menu.addAction('Check for Updates', self.updateCheckAction_triggered)
self.coffeeAction = tray_menu.addAction('Buy me a Coffee', self.coffeeAction_triggered)
tray_menu.addSeparator()
tray_menu.addAction('Quit', self.quitMenuAction_triggered)
self.setContextMenu(tray_menu)
self.apply_process_changed_setting()
if self.indexDB.get_settings().auto_update_check:
self.update_check_worker = QWorker(self.auto_update_check)
self.threadpool.start(self.update_check_worker)
def trayIcon_activated(self, reason):
pass
def startIndexAction_triggered(self):
if self.indexDB.get_settings().auto_show_log_window:
self.show_view_logs_window()
self.background_processing_started()
self.indexing_stop_event = Event()
self.indexing_worker = QWorker(self.start_indexing)
self.indexing_worker.signals.finished.connect(self.background_processing_finished)
self.threadpool.start(self.indexing_worker)
self.stopIndexAction.setEnabled(True)
def stopIndexAction_triggered(self):
response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question(
None, "Confirm Action", "Are you sure you want to stop the current task?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if QtWidgets.QMessageBox.Yes == response:
self.stopIndexAction.setEnabled(False)
self.stop_async_tasks()
def clearIndexAction_triggered(self):
response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question(
None, "Confirm Action", "Forget indexed files and delete all output files?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if QtWidgets.QMessageBox.Yes == response:
self.background_processing_started()
self.deletion_worker = QWorker(self.start_deletion, True)
self.deletion_worker.signals.finished.connect(self.background_processing_finished)
self.threadpool.start(self.deletion_worker)
def clearOutputDirsAction_triggered(self):
response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question(
None, "Confirm Action", "Delete all output files?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if QtWidgets.QMessageBox.Yes == response:
self.background_processing_started()
self.deletion_worker = QWorker(self.start_deletion, False)
self.deletion_worker.signals.finished.connect(self.background_processing_finished)
self.threadpool.start(self.deletion_worker)
def start_deletion(self, clearIndex: bool):
MiscUtils.debug_this_thread()
with IndexDB() as indexDB:
if clearIndex:
indexDB.clear_indexed_files()
self.__logger.info("Index cleared")
settings: Settings = indexDB.get_settings()
MiscUtils.recursively_delete_children(settings.output_dir)
MiscUtils.recursively_delete_children(settings.unknown_output_dir)
self.__logger.info("Output directories cleared")
def editPreferencesAction_triggered(self):
if self.preferences_window is None:
self.preferences_window = PreferencesWindow(self.apply_process_changed_setting)
self.preferences_window.show()
def viewLogsAction_triggered(self):
self.show_view_logs_window()
def show_view_logs_window(self):
if self.log_window is None:
self.log_window = LogWindow(self.threadpool)
self.log_window.show()
def updateCheckAction_triggered(self):
self.check_for_updates(True)
def auto_update_check(self):
MiscUtils.debug_this_thread()
self.check_for_updates(False)
def check_for_updates(self, display_not_found: bool):
api_url = "https://api.github.com/repos/sabaatworld/batch-media-compressor/releases/latest"
releases_url = "https://github.com/sabaatworld/batch-media-compressor/releases"
update_found = False
try:
ssl_context = ssl.create_default_context(cafile=certifi.where())
response = urlopen(api_url, context=ssl_context)
response_string = response.read().decode('utf-8')
response_json = json.loads(response_string)
tag_name: str = response_json["tag_name"]
if tag_name is not None:
release_version = version.parse(tag_name.replace("v", ""))
current_version = version.parse(self.__APP_VER)
self.__logger.info("Updated Check successful: Current Version: %s, Latest Release: %s", str(current_version), str(release_version))
if current_version < release_version:
update_found = True
except:
self.__logger.exception("Failed to check for updates")
if update_found:
if QtWidgets.QMessageBox.information(
None, "Update Check",
"New version available. Do you wish to download the latest release now?\n\nCurrent Verion: {}\nNew Version: {}".format(str(current_version), str(release_version)),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
) == QtWidgets.QMessageBox.Yes:
webbrowser.open(releases_url)
elif display_not_found:
QtWidgets.QMessageBox.information(None, "Update Check", "No updates found.\n\nIf you think this is an error, please check your internet connection and try again.", QtWidgets.QMessageBox.Ok)
def coffeeAction_triggered(self):
webbrowser.open('https://paypal.me/sabaat')
def quitMenuAction_triggered(self):
QtWidgets.QApplication.quit()
def start_indexing(self):
MiscUtils.debug_this_thread()
with IndexDB() as indexDB:
indexing_task = IndexingTask()
indexing_task.settings = indexDB.get_settings()
if self.settings_valid(indexing_task.settings):
misc_utils = MiscUtils(indexing_task)
misc_utils.create_root_marker()
indexing_helper = IndexingHelper(indexing_task, self.log_queue, self.indexing_stop_event)
(scanned_files, _) = indexing_helper.scan_dirs()
indexing_helper.remove_slate_files(indexDB, scanned_files)
indexing_helper.lookup_already_indexed_files(indexDB, scanned_files)
if not self.indexing_stop_event.is_set():
indexing_helper.create_media_files(scanned_files)
if not self.indexing_stop_event.is_set():
media_processor = MediaProcessor(indexing_task, self.log_queue, self.indexing_stop_event)
media_processor.save_processed_files(indexDB)
if not self.indexing_stop_event.is_set():
misc_utils.cleanEmptyOutputDirs()
def settings_valid(self, settings: Settings) -> bool:
error_msg: str = None
if settings.monitored_dir is None:
error_msg = "Directory to scan not configured"
elif not os.path.isdir(settings.monitored_dir):
error_msg = "Directory to scan is invalid"
elif settings.output_dir is None:
error_msg = "Media with Capture Date directory not configured"
elif not os.path.isdir(settings.output_dir):
error_msg = "Media with Capture Date directory is invalid"
elif settings.unknown_output_dir is None:
error_msg = "Media without Capture Date directory not configured"
elif not os.path.isdir(settings.unknown_output_dir):
error_msg = "Media without Capture Date directory is invalid"
if error_msg is not None:
self.__logger.error("Cannot start processing: %s. Please update preferences and try again.", error_msg)
return False
else:
return True
def background_processing_started(self):
self.startIndexAction.setEnabled(False)
self.clearIndexAction.setEnabled(False)
self.clearOutputDirsAction.setEnabled(False)
self.editPrefAction.setEnabled(False)
if self.preferences_window is not None:
self.preferences_window.hide()
def background_processing_finished(self):
self.startIndexAction.setEnabled(True)
self.stopIndexAction.setEnabled(False)
self.clearIndexAction.setEnabled(True)
self.clearOutputDirsAction.setEnabled(True)
self.editPrefAction.setEnabled(True)
def stop_async_tasks(self):
if self.indexing_stop_event:
self.indexing_stop_event.set()
def cleanup(self):
if self.preferences_window is not None:
self.preferences_window.cleanup()
if self.log_window is not None:
self.log_window.cleanup()
self.indexDB.disconnect_db()
def apply_process_changed_setting(self):
pass
| 11,159 | 3,185 |
from flask import jsonify
from app.api import api
from app.api.helpers import require_api_key_auth
from app.api.services.reports import briefs_service
@api.route('/reports/brief/published', methods=['GET'])
@require_api_key_auth
def get_published_briefs():
result = briefs_service.get_published_briefs()
return jsonify({
'items': result,
'total': len(result)
})
| 392 | 130 |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
from copy import copy
import numpy as np
def merge_vertices_and_normals(vertices, normals):
data = []
for i in range(len(vertices)):
data.append(vertices[i] + normals[i])
return data
def construct_triangle_sphere(slices, stacks, diameter):
""" src: http://jacksondunstan.com/articles/1904
"""
stepTheta = (2.0 * math.pi) / slices
stepPhi = math.pi / stacks
verticesPerStack = slices + 1
positions = []
normals = []
triangles = []
# Pre-compute half the sin/cos of thetas
halfCosThetas = []
halfSinThetas = []
curTheta = 0
for slice in range(verticesPerStack):
halfCosThetas.append(math.cos(curTheta) * 0.5)
halfSinThetas.append(math.sin(curTheta) * 0.5)
curTheta += stepTheta
# Generate positions
curPhi = math.pi
for stack in range(stacks + 1):
curY = math.cos(curPhi) * 0.5 * diameter
sinCurPhi = math.sin(curPhi)
for slice in range(verticesPerStack):
point = [halfCosThetas[slice] * sinCurPhi * diameter, curY, halfSinThetas[slice] * sinCurPhi * diameter]
positions.append(point)
normals.append([point[0], point[1], point[2]])
curPhi -= stepPhi
# Generate triangles
lastStackFirstVertexIndex = 0
curStackFirstVertexIndex = verticesPerStack
for stack in range(stacks):
for slice in range(slices):
# Bottom tri of the quad
a = lastStackFirstVertexIndex + slice + 1
b = curStackFirstVertexIndex + slice
c = lastStackFirstVertexIndex + slice
triangles.append([a, b, c])
# Top tri of the quad
a = lastStackFirstVertexIndex + slice + 1
b = curStackFirstVertexIndex + slice + 1
c = curStackFirstVertexIndex + slice
triangles.append([a, b, c])
lastStackFirstVertexIndex += verticesPerStack
curStackFirstVertexIndex += verticesPerStack
data = merge_vertices_and_normals(positions, normals)
return data, triangles
def construct_quad_box(width, height, depth):
print("create box", width, height, depth)
data = np.array([
# north
[-width / 2, -height / 2, -depth / 2, 0, 0, -1],
[-width / 2, height / 2, -depth / 2, 0, 0, -1],
[width / 2, height / 2, -depth / 2, 0, 0, -1],
[width / 2, -height / 2, -depth / 2, 0, 0, -1],
# ,[ width/2, -height/2, -depth/2],[ -width/2, -height/2, -depth/2]
###west
[-width / 2, -height / 2, -depth / 2, -1, 0, 0],
[-width / 2, height / 2, -depth / 2, -1, 0, 0],
[-width / 2, height / 2, depth / 2, -1, 0, 0],
[-width / 2, -height / 2, depth / 2, -1, 0, 0],
###south
[-width / 2, -height / 2, depth / 2, 0, 0, 1],
[-width / 2, height / 2, depth / 2, 0, 0, 1],
[width / 2, height / 2, depth / 2, 0, 0, 1],
[width / 2, -height / 2, depth / 2, 0, 0, 1],
###east
[width / 2, -height / 2, -depth / 2, 1, 0, 0],
[width / 2, height / 2, -depth / 2, 1, 0, 0],
[width / 2, height / 2, depth / 2, 1, 0, 0],
[width / 2, -height / 2, depth / 2, 1, 0, 0],
##bottom
[-width / 2, -height / 2, -depth / 2, 0, -1, 0],
[-width / 2, -height / 2, depth / 2, 0, -1, 0],
[width / 2, -height / 2, depth / 2, 0, -1, 0],
[width / 2, -height / 2, -depth / 2, 0, -1, 0],
##top
[-width / 2, height / 2, -depth / 2, 0, 1, 0],
[-width / 2, height / 2, depth / 2, 0, 1, 0],
[width / 2, height / 2, depth / 2, 0, 1, 0],
[width / 2, height / 2, -depth / 2, 0, 1, 0]
], 'f')
return data
def construct_quad_box_based_on_height(width, height, depth):
data = np.array([
# north
[-width / 2, 0.0, -depth / 2, 0, 0, -1],
[-width / 2, height, -depth / 2, 0, 0, -1],
[width / 2, height, -depth / 2, 0, 0, -1],
[width / 2, 0.0, -depth / 2, 0, 0, -1],
# ,[ width/2, -height/2, -depth/2],[ -width/2, -height/2, -depth/2]
###west
[-width / 2, 0.0, -depth / 2, -1, 0, 0],
[-width / 2, height, -depth / 2, -1, 0, 0],
[-width / 2, height, depth / 2, -1, 0, 0],
[-width / 2, 0.0, depth / 2, -1, 0, 0],
###south
[-width / 2, 0.0, depth / 2, 0, 0, 1],
[-width / 2, height, depth / 2, 0, 0, 1],
[width / 2, height, depth / 2, 0, 0, 1],
[width / 2, 0.0, depth / 2, 0, 0, 1],
###east
[width / 2, 0.0, -depth / 2, 1, 0, 0],
[width / 2, height, -depth / 2, 1, 0, 0],
[width / 2, height, depth / 2, 1, 0, 0],
[width / 2, 0.0, depth / 2, 1, 0, 0],
##bottom
[-width / 2, 0.0, -depth / 2, 0, 1, 0],
[-width / 2, 0.0, depth / 2, 0, 1, 0],
[width / 2, 0.0, depth / 2, 0, 1, 0],
[width / 2, 0.0, -depth / 2, 0, 1, 0],
##top
[-width / 2, height, -depth / 2, 0, -1, 0],
[-width / 2, height, depth / 2, 0, -1, 0],
[width / 2, height, depth / 2, 0, -1, 0],
[width / 2, height, -depth / 2, 0, -1, 0]
], 'f')
return data
def construct_triangle_cylinder(slices, radius, length):
""" http://monsterden.net/software/ragdoll-pyode-tutorial
http://wiki.unity3d.com/index.php/ProceduralPrimitives
"""
half_length = length / 2.0
vertices = []
normals = []
triangles = []
v_idx = 0
#bottom
vertices.append([0, 0, half_length])
normals.append([0, 0, -1])
for i in range(0, slices+1):
angle = i / float(slices) * 2.0 * np.pi
ca = np.cos(angle)
sa = np.sin(angle)
vertices.append([radius * ca, radius * sa, half_length])
normals.append([0, 0, 1])
for idx in range(0, slices):
triangles.append([0, v_idx+1, v_idx+2])
v_idx += 1
#sides
for i in range(0, slices+1):
angle = i / float(slices) * 2.0 * np.pi
ca = np.cos(angle)
sa = np.sin(angle)
vertices.append([radius * ca, radius * sa, half_length])
vertices.append([radius * ca, radius * sa, -half_length])
normals.append([ca, sa, 0])
normals.append([ca, sa, 0])
for idx in range(0, slices*2):
triangles.append([v_idx, v_idx + 1, v_idx + 2])
v_idx += 1
#top
start = len(vertices)
vertices.append([0, 0, -half_length])
normals.append([0, 0, -1])
for i in range(0, slices+1):
angle = i / float(slices) * 2.0 * np.pi
ca = np.cos(angle)
sa = np.sin(angle)
vertices.append([radius * ca, radius * sa, -half_length])
normals.append([0, 0, -1])
for idx in range(0, slices):
triangles.append([start, v_idx+1, v_idx + 2])
v_idx += 1
return merge_vertices_and_normals(vertices, normals), triangles
def construct_triangle_capsule(slices, stacks, diameter, length, direction="z"):
data, triangles = construct_triangle_sphere(slices, stacks, diameter)
data = np.array(data)
half_idx = int(len(data)/2.0)
half_len = length/2
data[:half_idx, 1] -= half_len
data[half_idx:, 1] += half_len
if direction == "x":
m = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, -1]])
data = transform_vertex_data(data, m)
elif direction == "z":
m = np.array([[1, 0, 0],
[0, 0, -1],
[0, 1, 0]])
data = transform_vertex_data(data, m)
return data, triangles
def transform_vertex_data(data, m):
transformed_data = []
for v in data:
t_v = np.zeros(6)
t_v[:3] = np.dot(m, v[:3])[:3]
t_v[3:] = np.dot(m, v[3:])[:3]
transformed_data.append(t_v)
return transformed_data
| 8,903 | 3,508 |
# MIT LICENSE Copyright (c) 2018 David Longnecker
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Luckily module initialization code is only run once.
_enum_counter = 0
_enum_strs = []
# So that I can reorder enum values at will.
def _intern(string):
global _enum_counter
result = _enum_counter
_enum_strs.append(string)
_enum_counter += 1
return result
# Fetch tokentype name from enum value.
def get_tokentype_str(toktype):
if toktype >= 0 and toktype < len(_enum_strs):
return _enum_strs[toktype]
return None
# These must appear in _strict_ order corresponding to opcode value.
t_op_nop = _intern('op:nop')
t_op_ldl = _intern('op:ldl')
t_op_stl = _intern('op:stl')
t_op_ldg = _intern('op:ldg')
t_op_stg = _intern('op:stg')
t_op_lfd = _intern('op:lfd')
t_op_sfd = _intern('op:sfd')
t_op_ldsc = _intern('op:ldsc')
t_op_pop = _intern('op:pop')
t_op_swp = _intern('op:swp')
t_op_dup = _intern('op:dup')
t_op_psh_b = _intern('op:psh_b')
t_op_psh_s = _intern('op:psh_s')
t_op_psh_d = _intern('op:psh_d')
t_op_psh_q = _intern('op:psh_q')
t_op_psh_f = _intern('op:psh_f')
t_op_psh_a = _intern('op:psh_a')
t_op_psh_nil = _intern('op:psh_nil')
t_op_par_b = _intern('op:par_b')
t_op_par_s = _intern('op:par_s')
t_op_par_d = _intern('op:par_d')
t_op_par_q = _intern('op:par_q')
t_op_par_f = _intern('op:par_f')
t_op_par_a = _intern('op:par_a')
t_op_lai = _intern('op:lai')
t_op_sai = _intern('op:sai')
t_op_alen = _intern('op:alen')
t_op_and = _intern('op:and')
t_op_or = _intern('op:or')
t_op_xor = _intern('op:xor')
t_op_not = _intern('op:not')
t_op_shl = _intern('op:shl')
t_op_shr = _intern('op:shr')
t_op_add_q = _intern('op:add_q')
t_op_sub_q = _intern('op:sub_q')
t_op_mul_q = _intern('op:mul_q')
t_op_div_q = _intern('op:div_q')
t_op_mod_q = _intern('op:mod_q')
t_op_neg_q = _intern('op:neg_q')
t_op_add_f = _intern('op:add_f')
t_op_sub_f = _intern('op:sub_f')
t_op_mul_f = _intern('op:mul_f')
t_op_div_f = _intern('op:div_f')
t_op_mod_f = _intern('op:mod_f')
t_op_neg_f = _intern('op:neg_f')
t_op_cst_qf = _intern('op:cst_qf')
t_op_cst_fq = _intern('op:cst_fq')
t_op_cmp_q = _intern('op:cmp_q')
t_op_cmp_f = _intern('op:cmp_f')
t_op_refcmp = _intern('op:refcmp')
t_op_jmp_eqz = _intern('op:jmp_eqz')
t_op_jmp_nez = _intern('op:jmp_nez')
t_op_jmp_ltz = _intern('op:jmp_ltz')
t_op_jmp_lez = _intern('op:jmp_lez')
t_op_jmp_gtz = _intern('op:jmp_gtz')
t_op_jmp_gez = _intern('op:jmp_gez')
t_op_jmp = _intern('op:jmp')
t_op_typeof = _intern('op:typeof')
t_op_call = _intern('op:call')
t_op_ret = _intern('op:ret')
t_op_leave = _intern('op:leave')
t_op_break = _intern('op:break')
t_op_throw = _intern('op:throw')
# Additional values to be used in the lexer/parser!
t_eof = _intern('eof')
t_unknown = _intern('unknown')
# Recognized whitespace tokens.
t_comment = _intern('comment')
t_spaces = _intern('spaces')
# LL(1) formatting characters.
t_newline = _intern('newline')
t_tab = _intern('tab')
# LL(1) braces and brackets.
t_lparen = _intern('lparen')
t_rparen = _intern('rparen')
t_lbrace = _intern('lbrace')
t_rbrace = _intern('rbrace')
t_lbracket = _intern('lbracket')
t_rbracket = _intern('rbracket')
# LL(1) comparison operators.
t_less = _intern('less')
t_greater = _intern('greater')
# LL(1) punctuation characters.
t_semicolon = _intern('semicolon')
t_comma = _intern('comma')
t_period = _intern('period')
t_colon = _intern('colon')
# LL(1) operators and meta symbols.
t_assign = _intern('assign')
t_star = _intern('star')
t_fslash = _intern('fslash')
t_percent = _intern('percent')
t_amper = _intern('amper')
t_at = _intern('at')
t_dollar = _intern('dollar')
# Literal values.
t_int = _intern('int')
t_str = _intern('str')
t_flt = _intern('flt')
t_hex = _intern('hex')
t_bin = _intern('bin')
# There's gonna be a whole lotta these!
t_symbol = _intern('symbol')
# Additional assembler keywords.
t_method = _intern('kw:method')
t_object = _intern('kw:object')
t_try = _intern('kw:try')
t_except = _intern('kw:except')
t_void = _intern('kw:void')
# Relies on opcode tokens being interned first!
def get_opcode_str(op):
if op < t_op_nop or op > t_op_eox:
return 'unknown'
return get_tokentype_str(op)
_keywords = [
t_method,
t_object,
t_try,
t_except,
t_void
]
_whitespace = [
t_comment,
t_spaces,
t_newline,
t_tab
]
_literals = [
t_int,
t_str,
t_flt,
t_hex,
t_bin
]
_instruction = [
t_op_nop,
t_op_ldl,
t_op_stl,
t_op_ldg,
t_op_stg,
t_op_lfd,
t_op_sfd,
t_op_ldsc,
t_op_pop,
t_op_swp,
t_op_dup,
t_op_psh_b,
t_op_psh_s,
t_op_psh_d,
t_op_psh_q,
t_op_psh_f,
t_op_psh_a,
t_op_psh_nil,
t_op_par_b,
t_op_par_s,
t_op_par_d,
t_op_par_q,
t_op_par_f,
t_op_par_a,
t_op_lai,
t_op_sai,
t_op_alen,
t_op_and,
t_op_or,
t_op_xor,
t_op_not,
t_op_shl,
t_op_shr,
t_op_add_q,
t_op_sub_q,
t_op_mul_q,
t_op_div_q,
t_op_mod_q,
t_op_neg_q,
t_op_add_f,
t_op_sub_f,
t_op_mul_f,
t_op_div_f,
t_op_mod_f,
t_op_neg_f,
t_op_cst_qf,
t_op_cst_fq,
t_op_cmp_q,
t_op_cmp_f,
t_op_refcmp,
t_op_jmp_eqz,
t_op_jmp_nez,
t_op_jmp_ltz,
t_op_jmp_lez,
t_op_jmp_gtz,
t_op_jmp_gez,
t_op_jmp,
t_op_typeof,
t_op_call,
t_op_ret,
t_op_leave,
t_op_break,
t_op_throw
]
_jump = [
t_op_jmp_eqz,
t_op_jmp_nez,
t_op_jmp_ltz,
t_op_jmp_lez,
t_op_jmp_gtz,
t_op_jmp_gez,
t_op_jmp
]
_interned_arg = [
t_op_psh_a,
t_op_par_a,
t_op_call,
t_op_ldsc,
t_op_psh_q,
t_op_psh_f
]
_has_immediate_u8 = [
t_op_ldl,
t_op_stl
]
_has_immediate_u16 = [
t_op_ldg,
t_op_stg,
t_op_lfd,
t_op_sfd
]
_has_immediate_u32 = _jump + [
t_op_psh_a,
t_op_par_a,
t_op_call,
t_op_ldsc,
t_op_psh_q,
t_op_psh_f
]
_has_immediate_u64 = []
_has_immediate_i8 = [ t_op_psh_b ]
_has_immediate_i16 = [ t_op_psh_s ]
_has_immediate_i32 = [ t_op_psh_d ]
_has_immediate_i64 = []
_has_immediate_f32 = []
_has_immediate_f64 = []
_has_immediate = (
_has_immediate_u8 +
_has_immediate_u16 +
_has_immediate_u32 +
_has_immediate_u64 +
_has_immediate_i8 +
_has_immediate_i16 +
_has_immediate_i32 +
_has_immediate_i64 +
_has_immediate_f32 +
_has_immediate_f64
)
# Tokens that can have varying values.
_non_static = _literals + [t_symbol] + [t_comment] + [t_spaces]
def is_keyword(v):
return v in _keywords
def is_literal(v):
return v in _literals
def is_non_static(v):
return v in _non_static
def is_whitespace(v):
return v in _whitespace
def is_instruction(v):
return v in _instruction
def is_jump(v):
return v in _jump
def has_interned_arg(v):
return v in _interned_arg
def has_immediate_u8(v):
return v in _has_immediate_u8
def has_immediate_u16(v):
return v in _has_immediate_u16
def has_immediate_u32(v):
return v in _has_immediate_u32
def has_immediate_u64(v):
return v in _has_immediate_u64
def has_immediate_i8(v):
return v in _has_immediate_i8
def has_immediate_i16(v):
return v in _has_immediate_i16
def has_immediate_i32(v):
return v in _has_immediate_i32
def has_immediate_i64(v):
return v in _has_immediate_i64
def has_immediate_f32(v):
return v in _has_immediate_f32
def has_immediate_f64(v):
return v in _has_immediate_f64
def has_immediate(v):
return v in _has_immediate
| 9,212 | 4,128 |
# Copyright 2021 CR.Sparse Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import jax.numpy as jnp
from .lop import Operator
def reshape(in_shape, out_shape):
"""Returns a linear operator which reshapes vectors from model space to data space
Args:
in_shape (int): Shape of vectors in the model space
out_shape (int): Shape of vectors in the data space
Returns:
(Operator): A reshaping linear operator
"""
in_size = jnp.prod(jnp.array(in_shape))
out_size = jnp.prod(jnp.array(out_shape))
assert in_size == out_size, "Input and output size must be equal"
times = lambda x: jnp.reshape(x, out_shape)
trans = lambda x : jnp.reshape(x, in_shape)
return Operator(times=times, trans=trans, shape=(out_shape,in_shape))
def arr2vec(shape):
"""Returns a linear operator which reshapes arrays to vectors
Args:
shape (int): Shape of arrays in the model space
Returns:
(Operator): An array to vec linear operator
"""
in_size = reduce((lambda x, y: x * y), shape)
out_shape = (in_size,)
times = lambda x: jnp.reshape(x, (in_size,))
trans = lambda x : jnp.reshape(x, shape)
return Operator(times=times, trans=trans, shape=(out_shape,shape))
| 1,803 | 570 |
from .base import *
INSTALLED_APPS += [
'debug_toolbar',
'zappa_django_utils',
'storages',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = [
'127.0.0.1',
]
DATABASES = {
'default': {
'ENGINE': 'zappa_django_utils.db.backends.s3sqlite',
'NAME': 'sqlite.db',
'BUCKET': 'innuylambda'
}
}
ALLOWED_HOSTS = ['*']
AWS_STORAGE_BUCKET_NAME = 'innuylambda-static'
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = "https://%s/" % AWS_S3_CUSTOM_DOMAIN
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage' | 637 | 286 |
import torch
import torch.nn as nn
import torchvision
class UNET(nn.Module):
THIRD_POOLING_INDEX = 16
FORTH_POOLING_INDEX = 23
def __init__(self, n_class = 1):
super(UNET, self).__init__()
# Contracting Path
self.c1 = UNET.get_conv2d_block(3, 16, 3, 1)
self.p1 = nn.MaxPool2d(2)
self.d1 = nn.Dropout2d()
self.c2 = UNET.get_conv2d_block(16, 32, 3, 1)
self.p2 = nn.MaxPool2d(2)
self.d2 = nn.Dropout2d()
self.c3 = UNET.get_conv2d_block(32, 64, 3, 1)
self.p3 = nn.MaxPool2d(2)
self.d3 = nn.Dropout2d()
self.c4 = UNET.get_conv2d_block(64, 128, 3, 1)
self.p4 = nn.MaxPool2d(2)
self.d4 = nn.Dropout2d()
self.c5 = UNET.get_conv2d_block(128, 256, 3, 1)
self.u6 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2, padding=0)
self.d6 = nn.Dropout2d()
self.c6 = UNET.get_conv2d_block(256, 128, 3, 1)
self.u7 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2, padding=0)
self.d7 = nn.Dropout2d()
self.c7 = UNET.get_conv2d_block(128, 64, 3, 1)
self.u8 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2, padding=0)
self.d8 = nn.Dropout2d()
self.c8 = UNET.get_conv2d_block(64, 32, 3, 1)
self.u9 = nn.ConvTranspose2d(32, 16, kernel_size=2, stride=2, padding=0)
self.d9 = nn.Dropout2d()
self.c9 = UNET.get_conv2d_block(32, 16, 3, 1)
self.c10 = nn.Conv2d(16, 1, 1)
self.activation = nn.Sigmoid()
#outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
def forward(self, batch):
c1_output = self.c1(batch)
h = c1_output
h = self.p1(h)
h = self.d1(h)
c2_output = self.c2(h)
h = c2_output
h = self.p2(h)
h = self.d2(h)
c3_output = self.c3(h)
h = c3_output
h = self.p3(h)
h = self.d3(h)
c4_output = self.c4(h)
h = c4_output
h = self.p4(h)
h = self.d4(h)
h = self.c5(h)
u = self.u6(h)
h = torch.cat((u, c4_output), dim=(1))
h = self.d6(h)
h = self.c6(h)
u = self.u7(h)
h = torch.cat((u, c3_output), dim=(1))
h = self.d7(h)
h = self.c7(h)
u = self.u8(h)
h = torch.cat((u, c2_output), dim=(1))
h = self.d8(h)
h = self.c8(h)
u = self.u9(h)
h = torch.cat((u, c1_output), dim=(1))
h = self.d9(h)
h = self.c9(h)
h = self.c10(h)
ret = self.activation(h)
return ret
@staticmethod
def get_conv2d_block(input_size, output_size, kernel_size, padding):
"""Function to add 2 convolutional layers with the parameters passed to it"""
# first layer
# kernel_initializer = 'he_normal', padding = 'same'
conv2d_block = nn.Sequential()
conv2d = nn.Conv2d(input_size, output_size, kernel_size = kernel_size, padding=padding)
conv2d_block.add_module('conv_0', conv2d)
conv2d_block.add_module('batchnorm_0', nn.BatchNorm2d(output_size))
conv2d_block.add_module('relu0', nn.ReLU())
conv2d_2 = nn.Conv2d(output_size, output_size, kernel_size=kernel_size, padding=padding)
conv2d_block.add_module('conv_1', conv2d_2)
conv2d_block.add_module('batchnorm_1', nn.BatchNorm2d(output_size))
conv2d_block.add_module('relu0', nn.ReLU())
return conv2d_block | 3,658 | 1,589 |
import frappe
import os
import json
import datetime
import uuid
@frappe.whitelist(allow_guest=True)
def clustering_and_scheduling():
for trip in json.loads(frappe.local.request.values["inputData"]):
build_trip(trip)
def build_trip(dat):
d = frappe.get_doc({
"doctype":"DRS",
"trip_name":dat["tripName"],
"status":"Clustering And Scheduling",
"driver_name":dat["driverName"],
"vehicle":dat["vehicle"],
"shipment_details":parse_shipment_details(dat["shipmentDetails"])
})
d.insert()
frappe.db.commit()
def parse_shipment_details(shdetails):
result = []
for shipment in shdetails:
d = dict()
d["latitude"] = shipment["latitude"]
d["longitude"] = shipment["longitude"]
d["awb"] = shipment["clientShipmentId"]
d["delivery_order"] = shipment["deliveryOrder"]
d["Status"] = "Unknown"
result.append(d)
return result
@frappe.whitelist(allow_guest=True)
def dispatch_start_trip():
start_trip(json.loads(frappe.local.request.values["inputData"]))
def start_trip(trip):
t = frappe.get_list("DRS", fields=["*"],
filters={"trip_name":trip["tripName"]})[0]
tx = frappe.get_doc("DRS", t["name"])
tx.status = "Start Trip"
tx.save()
@frappe.whitelist(allow_guest=True)
def load_items():
val = frappe.local.request.values["inputData"]
awb = json.loads(val)["clientShipmentId"]
t = frappe.get_list("Shipment Details", fields=["*"],
filters={"awb":awb})[0]
tx = frappe.get_doc("Shipment Details", t["name"])
tx.status = "Loaded"
tx.save()
@frappe.whitelist(allow_guest=True)
def pickup():
val = frappe.local.request.values["inputData"]
awb = json.loads(val)["clientShipmentId"]
t = frappe.get_list("Shipment Details", fields=["*"],
filters={"awb":awb})[0]
tx = frappe.get_doc("Shipment Details", t["name"])
tx.status = "Picked Up"
tx.save()
@frappe.whitelist(allow_guest=True)
def delivery_notification():
parcel = json.loads(frappe.local.request.values["inputData"])
set_deliverd(parcel, "Delivered")
def set_deliverd(parcel, status):
awb = parcel["clientShipmentId"]
t = frappe.get_list("Shipment Details", fields=["*"],
filters={"awb":awb})[0]
tx = frappe.get_doc("Shipment Details", t["name"])
tx.status = status
tx.latitude = parcel["latitude"]
tx.longitude = parcel["longitude"]
tx.save()
@frappe.whitelist(allow_guest=True)
def not_deliverd_notification():
parcel = json.loads(frappe.local.request.values["inputData"])
set_deliverd(parcel, "Not Delivered")
@frappe.whitelist(allow_guest=True)
def partial_delivery_notification():
parcel = json.loads(frappe.local.request.values["inputData"])
set_deliverd(parcel, "Partial Delivery")
@frappe.whitelist(allow_guest=True)
def arrival_end_trip():
trip = json.loads(frappe.local.request.values["inputData"])
t = frappe.get_list("DRS", fields=["*"],
filters={"trip_name":trip["tripName"]})[0]
tx = frappe.get_doc("DRS", t["name"])
tx.status = "End Trip"
tx.save()
#---------------------------------------------THROW-----------------------------
@frappe.whitelist(allow_guest=True)
def clear_all_cache():
frappe.clear_cache()
return "cache cleard"
#@frappe.whitelist(allow_guest=True)
#def arrival_end_trip():
# open(os.path.expanduser("~/erp_data/arrival_end_trip.json"),
# "a").write(frappe.local.request.data + "\n")
@frappe.whitelist(allow_guest=True)
def accept():
open(os.path.expanduser("~/erp_data/accept.json"),
"a").write(frappe.local.request.data + "\n")
@frappe.whitelist(allow_guest=True)
def reject():
open(os.path.expanduser("~/erp_data/reject.json"),
"a").write(frappe.local.request.data + "\n")
@frappe.whitelist(allow_guest=True)
def clustering_updates():
open(os.path.expanduser("~/erp_data/clustering_updates.json"),
"a").write(frappe.local.request.data + "\n")
#CombinedMultiDict([ImmutableMultiDict([]), ImmutableMultiDict([('inputData', u'[{"tripName":"TRIP-32","deliveryMediumName":"MEHUL","driverName":"","vehicle":"","shipmentDetails":[{"latitude":19.199272,"longitude":72.857732,"clientShipmentId":"222222201","deliveryOrder":4},{"latitude":19.199272,"longitude":72.857732,"clientShipmentId":"112000003","deliveryOrder":5},{"latitude":19.1076375,"longitude":72.8655789,"clientShipmentId":"test_order","deliveryOrder":6}]}]')])])
| 4,538 | 1,680 |
from __future__ import annotations
from typing import List, TypeVar, Dict
from sqlalchemy.engine import base
from sqlalchemy.ext.declarative import declarative_base
from logic_bank.exec_row_logic.logic_row import LogicRow
class RowSets():
"""
Sets of rows used in transaction
* processed_rows: Dict of all the logic_rows processed in this transaction, by row instance (no dups)
Used to drive commit events/constraints
* submitted_rows: set of rows submitted by client
Used to avoid adjusting altered rows
Presumes that sqlalchemy returns same instance for multiple queries.
"""
def __init__(self):
self.processed_rows = {} # type: Dict[base, 'LogicRow']
self.submitted_row = set()
def add_processed(self, logic_row: 'LogicRow'):
"""
Denote row processed, for later commit events/constraints
"""
if logic_row.row not in self.processed_rows:
self.processed_rows[logic_row.row] = logic_row
def add_submitted(self, row: base):
self.submitted_row.add(row)
def is_submitted(self, row: base) -> bool:
result = row in self.submitted_row
return result
def remove_submitted(self, logic_row: LogicRow):
if logic_row.row in self.submitted_row:
self.submitted_row.remove(logic_row.row)
| 1,365 | 414 |
"""
A collection of array-based algorithms
1. Find maximum sub-array
- https://en.wikipedia.org/wiki/Maximum_subarray_problem
Author: Matthew R. DeVerna
"""
from .utils import check_array
def max_subarray_kadane(given_array):
"""
Find a contiguous subarray with the largest sum.
Note: This algorithm is implemented with Kadane's algorithm with a slight
change (we do not add 1 to the best_end)
- https://en.wikipedia.org/wiki/Maximum_subarray_problem#Kadane's_algorithm
Complexity:
----------
- O(n)
Parameters:
----------
- given_array (list) : a numerical sequence
Returns:
----------
- best_sum (int) : the total sum between `best_start` and `best_end`
- best_start (int) : the first index in the largest sub-array (inclusive)
- best_end (int) : the last index in the largest sub-array (inclusive)
Exceptions:
----------
- TypeError
Example:
----------
lst = [-45, -78, -2, -60, 27, 21, 71, 80, 22, 59]
max_subarray(lst)
# Output
(280, 4, 10)
Where 280 is the sum between lst[4] (27, inclusive) and lst[9] (59, inclusive)
"""
# Ensure array is a list and contains only numeric values
check_array(given_array)
best_sum = float('-inf')
best_start = best_end = None
current_sum = 0
for current_end, x in enumerate(given_array):
if current_sum <= 0:
# Start a new sequence at the current element
current_start = current_end
current_sum = x
else:
# Extend the existing sequence with the current element
current_sum += x
if current_sum > best_sum:
best_sum = current_sum
best_start = current_start
best_end = current_end
return best_sum, best_start, best_end | 1,836 | 601 |
########################################################################################
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################################
from Logger import *
class ContractUtils:
def __init__(self, context):
# Set the context
self.context = context
# Set the logger
self.logger = Logger(context, className = type(self).__name__, logLevel = context.logLevel)
def getUnderlyingLastPrice(self, contract):
# Get the context
context = self.context
# Get the object from the Securities dictionary if available (pull the latest price), else use the contract object itself
if contract.UnderlyingSymbol in context.Securities:
security = context.Securities[contract.UnderlyingSymbol]
# Check if we have found the security
if security != None:
# Get the last known price of the security
return context.GetLastKnownPrice(security).Price
else:
# Get the UnderlyingLastPrice attribute of the contract
return contract.UnderlyingLastPrice
def getSecurity(self, contract):
# Get the Securities object
Securities = self.context.Securities
# Check if we can extract the Symbol attribute
if hasattr(contract, "Symbol") and contract.Symbol in Securities:
# Get the security from the Securities dictionary if available (pull the latest price), else use the contract object itself
security = Securities[contract.Symbol]
else:
# Use the contract itself
security = contract
return security
# Returns the mid-price of an option contract
def midPrice(self, contract):
security = self.getSecurity(contract)
return 0.5*(security.BidPrice + security.AskPrice)
def bidAskSpread(self, contract):
security = self.getSecurity(contract)
return abs(security.AskPrice - security.BidPrice)
| 2,922 | 664 |
from functools import reduce
import numpy as np
import pandas as pd
import pyprind
from .enums import *
class Backtest:
"""Backtest runner class."""
def __init__(self, allocation, initial_capital=1_000_000, shares_per_contract=100):
assets = ('stocks', 'options', 'cash')
total_allocation = sum(allocation.get(a, 0.0) for a in assets)
self.allocation = {}
for asset in assets:
self.allocation[asset] = allocation.get(asset, 0.0) / total_allocation
self.initial_capital = initial_capital
self.stop_if_broke = True
self.shares_per_contract = shares_per_contract
self._stocks = []
self._options_strategy = None
self._stocks_data = None
self._options_data = None
@property
def stocks(self):
return self._stocks
@stocks.setter
def stocks(self, stocks):
assert np.isclose(sum(stock.percentage for stock in stocks), 1.0,
atol=0.000001), 'Stock percentages must sum to 1.0'
self._stocks = list(stocks)
return self
@property
def options_strategy(self):
return self._options_strategy
@options_strategy.setter
def options_strategy(self, strat):
self._options_strategy = strat
@property
def stocks_data(self):
return self._stocks_data
@stocks_data.setter
def stocks_data(self, data):
self._stocks_schema = data.schema
self._stocks_data = data
@property
def options_data(self):
return self._options_data
@options_data.setter
def options_data(self, data):
self._options_schema = data.schema
self._options_data = data
def run(self, rebalance_freq=0, monthly=False, sma_days=None):
"""Runs the backtest and returns a `pd.DataFrame` of the orders executed (`self.trade_log`)
Args:
rebalance_freq (int, optional): Determines the frequency of portfolio rebalances. Defaults to 0.
monthly (bool, optional): Iterates through data monthly rather than daily. Defaults to False.
Returns:
pd.DataFrame: Log of the trades executed.
"""
assert self._stocks_data, 'Stock data not set'
assert all(stock.symbol in self._stocks_data['symbol'].values
for stock in self._stocks), 'Ensure all stocks in portfolio are present in the data'
assert self._options_data, 'Options data not set'
assert self._options_strategy, 'Options Strategy not set'
assert self._options_data.schema == self._options_strategy.schema
option_dates = self._options_data['date'].unique()
stock_dates = self.stocks_data['date'].unique()
assert np.array_equal(stock_dates,
option_dates), 'Stock and options dates do not match (check that TZ are equal)'
self._initialize_inventories()
self.current_cash = self.initial_capital
self.trade_log = pd.DataFrame()
self.balance = pd.DataFrame({
'total capital': self.current_cash,
'cash': self.current_cash
},
index=[self.stocks_data.start_date - pd.Timedelta(1, unit='day')])
if sma_days:
self.stocks_data.sma(sma_days)
dates = pd.DataFrame(self.options_data._data[['quotedate',
'volume']]).drop_duplicates('quotedate').set_index('quotedate')
rebalancing_days = pd.to_datetime(
dates.groupby(pd.Grouper(freq=str(rebalance_freq) +
'BMS')).apply(lambda x: x.index.min()).values) if rebalance_freq else []
data_iterator = self._data_iterator(monthly)
bar = pyprind.ProgBar(len(stock_dates), bar_char='█')
for date, stocks, options in data_iterator:
if (date in rebalancing_days):
previous_rb_date = rebalancing_days[rebalancing_days.get_loc(date) -
1] if rebalancing_days.get_loc(date) != 0 else date
self._update_balance(previous_rb_date, date)
self._rebalance_portfolio(date, stocks, options, sma_days)
bar.update()
# Update balance for the period between the last rebalancing day and the last day
self._update_balance(rebalancing_days[-1], self.stocks_data.end_date)
self.balance['options capital'] = self.balance['calls capital'] + self.balance['puts capital']
self.balance['stocks capital'] = sum(self.balance[stock.symbol] for stock in self._stocks)
self.balance['stocks capital'].iloc[0] = 0
self.balance['options capital'].iloc[0] = 0
self.balance[
'total capital'] = self.balance['cash'] + self.balance['stocks capital'] + self.balance['options capital']
self.balance['% change'] = self.balance['total capital'].pct_change()
self.balance['accumulated return'] = (1.0 + self.balance['% change']).cumprod()
return self.trade_log
def _initialize_inventories(self):
"""Initialize empty stocks and options inventories."""
columns = pd.MultiIndex.from_product(
[[l.name for l in self._options_strategy.legs],
['contract', 'underlying', 'expiration', 'type', 'strike', 'cost', 'order']])
totals = pd.MultiIndex.from_product([['totals'], ['cost', 'qty', 'date']])
self._options_inventory = pd.DataFrame(columns=columns.append(totals))
self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty'])
def _data_iterator(self, monthly):
"""Returns combined iterator for stock and options data.
Each step, it produces a tuple like the following:
(date, stocks, options)
Returns:
generator: Daily/monthly iterator over `self._stocks_data` and `self.options_data`.
"""
if monthly:
it = zip(self._stocks_data.iter_months(), self._options_data.iter_months())
else:
it = zip(self._stocks_data.iter_dates(), self._options_data.iter_dates())
return ((date, stocks, options) for (date, stocks), (_, options) in it)
def _rebalance_portfolio(self, date, stocks, options, sma_days):
"""Reabalances the portfolio according to `self.allocation` weights.
Args:
date (pd.Timestamp): Current date.
stocks (pd.DataFrame): Stocks data for the current date.
options (pd.DataFrame): Options data for the current date.
sma_days (int): SMA window size
"""
self._execute_option_exits(date, options)
stock_capital = self._current_stock_capital(stocks)
options_capital = self._current_options_capital(options)
total_capital = self.current_cash + stock_capital + options_capital
# buy stocks
stocks_allocation = self.allocation['stocks'] * total_capital
self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty'])
# We simulate a sell of the stock positions and then a rebuy.
# This would **not** work if we added transaction fees.
self.current_cash = stocks_allocation + total_capital * self.allocation['cash']
self._buy_stocks(stocks, stocks_allocation, sma_days)
# exit/enter contracts
options_allocation = self.allocation['options'] * total_capital
if options_allocation >= options_capital:
self._execute_option_entries(date, options, options_allocation - options_capital)
else:
to_sell = options_capital - options_allocation
current_options = self._get_current_option_quotes(options)
self._sell_some_options(date, to_sell, current_options)
def _sell_some_options(self, date, to_sell, current_options):
sold = 0
total_costs = sum([current_options[i]['cost'] for i in range(len(current_options))])
for (exit_cost, (row_index, inventory_row)) in zip(total_costs, self._options_inventory.iterrows()):
if (to_sell - sold > -exit_cost) and (to_sell - sold) > 0:
qty_to_sell = (to_sell - sold) // exit_cost
if -qty_to_sell <= inventory_row['totals']['qty']:
qty_to_sell = (to_sell - sold) // exit_cost
else:
if qty_to_sell != 0:
qty_to_sell = -inventory_row['totals']['qty']
if qty_to_sell != 0:
trade_log_append = self._options_inventory.loc[row_index].copy()
trade_log_append['totals', 'qty'] = -qty_to_sell
trade_log_append['totals', 'date'] = date
trade_log_append['totals', 'cost'] = exit_cost
for i, leg in enumerate(self._options_strategy.legs):
trade_log_append[leg.name, 'order'] = ~trade_log_append[leg.name, 'order']
trade_log_append[leg.name, 'cost'] = current_options[i].loc[row_index]['cost']
self.trade_log = self.trade_log.append(trade_log_append, ignore_index=True)
self._options_inventory.at[row_index, ('totals', 'date')] = date
self._options_inventory.at[row_index, ('totals', 'qty')] += qty_to_sell
sold += (qty_to_sell * exit_cost)
self.current_cash += sold - to_sell
def _current_stock_capital(self, stocks):
"""Return the current value of the stocks inventory.
Args:
stocks (pd.DataFrame): Stocks data for the current time step.
Returns:
float: Total capital in stocks.
"""
current_stocks = self._stocks_inventory.merge(stocks,
how='left',
left_on='symbol',
right_on=self._stocks_schema['symbol'])
return (current_stocks[self._stocks_schema['adjClose']] * current_stocks['qty']).sum()
def _current_options_capital(self, options):
options_value = self._get_current_option_quotes(options)
values_by_row = [0] * len(options_value[0])
if len(options_value[0]) != 0:
for i in range(len(self._options_strategy.legs)):
values_by_row += options_value[i]['cost'].values
total = -sum(values_by_row * self._options_inventory['totals']['qty'].values)
else:
total = 0
return total
def _buy_stocks(self, stocks, allocation, sma_days):
"""Buys stocks according to their given weight, optionally using an SMA entry filter.
Updates `self._stocks_inventory` and `self.current_cash`.
Args:
stocks (pd.DataFrame): Stocks data for the current time step.
allocation (float): Total capital allocation for stocks.
sma_days (int): SMA window.
"""
stock_symbols = [stock.symbol for stock in self.stocks]
query = '{} in {}'.format(self._stocks_schema['symbol'], stock_symbols)
inventory_stocks = stocks.query(query)
stock_percentages = np.array([stock.percentage for stock in self.stocks])
stock_prices = inventory_stocks[self._stocks_schema['adjClose']]
if sma_days:
qty = np.where(inventory_stocks['sma'] < stock_prices, (allocation * stock_percentages) // stock_prices, 0)
else:
qty = (allocation * stock_percentages) // stock_prices
self.current_cash -= np.sum(stock_prices * qty)
self._stocks_inventory = pd.DataFrame({'symbol': stock_symbols, 'price': stock_prices, 'qty': qty})
def _update_balance(self, start_date, end_date):
"""Updates self.balance in batch in a certain period between rebalancing days"""
stocks_date_col = self._stocks_schema['date']
stocks_data = self._stocks_data.query('({date_col} >= "{start_date}") & ({date_col} < "{end_date}")'.format(
date_col=stocks_date_col, start_date=start_date, end_date=end_date))
options_date_col = self._options_schema['date']
options_data = self._options_data.query('({date_col} >= "{start_date}") & ({date_col} < "{end_date}")'.format(
date_col=options_date_col, start_date=start_date, end_date=end_date))
calls_value = pd.Series(0, index=options_data[options_date_col].unique())
puts_value = pd.Series(0, index=options_data[options_date_col].unique())
for leg in self._options_strategy.legs:
leg_inventory = self._options_inventory[leg.name]
cost_field = (~leg.direction).value
for contract in leg_inventory['contract']:
leg_inventory_contract = leg_inventory.query('contract == "{}"'.format(contract))
qty = self._options_inventory.loc[leg_inventory_contract.index]['totals']['qty'].values[0]
options_contract_col = self._options_schema['contract']
current = leg_inventory_contract[['contract']].merge(options_data,
how='left',
left_on='contract',
right_on=options_contract_col)
current.set_index(options_date_col, inplace=True)
if cost_field == Direction.BUY.value:
current[cost_field] = -current[cost_field]
if (leg_inventory_contract['type'] == Type.CALL.value).any():
calls_value = calls_value.add(current[cost_field] * qty * self.shares_per_contract, fill_value=0)
else:
puts_value = puts_value.add(current[cost_field] * qty * self.shares_per_contract, fill_value=0)
stocks_current = self._stocks_inventory[['symbol', 'qty']].merge(stocks_data[['date', 'symbol', 'adjClose']],
on='symbol')
stocks_current['cost'] = stocks_current['qty'] * stocks_current['adjClose']
columns = [
stocks_current[stocks_current['symbol'] == stock.symbol].set_index(stocks_date_col)[[
'cost'
]].rename(columns={'cost': stock.symbol}) for stock in self._stocks
]
add = pd.concat(columns, axis=1)
add['cash'] = self.current_cash
add['options qty'] = self._options_inventory['totals']['qty'].sum()
add['calls capital'] = calls_value
add['puts capital'] = puts_value
add['stocks qty'] = self._stocks_inventory['qty'].sum()
for _index, row in self._stocks_inventory.iterrows():
symbol = row['symbol']
add[symbol + ' qty'] = row['qty']
# sort=False means we're assuming the updates are done in chronological order, i.e,
# the dates in add are the immediate successors to the ones at the end of self.balance.
# Pass sort=True to ensure self.balance is always sorted chronologically if needed.
self.balance = self.balance.append(add, sort=False)
def _execute_option_entries(self, date, options, options_allocation):
"""Enters option positions according to `self._options_strategy`.
Calls `self._pick_entry_signals` to select from the entry signals given by the strategy.
Updates `self._options_inventory` and `self.current_cash`.
Args:
date (pd.Timestamp): Current date.
options (pd.DataFrame): Options data for the current time step.
options_allocation (float): Capital amount allocated to options.
"""
self.current_cash += options_allocation
# Remove contracts already in inventory
inventory_contracts = pd.concat(
[self._options_inventory[leg.name]['contract'] for leg in self._options_strategy.legs])
subset_options = options[~options[self._options_schema['contract']].isin(inventory_contracts)]
entry_signals = []
for leg in self._options_strategy.legs:
flt = leg.entry_filter
cost_field = leg.direction.value
leg_entries = subset_options[flt(subset_options)]
# Exit if no entry signals for the current leg
if leg_entries.empty:
return
fields = self._signal_fields(cost_field)
leg_entries = leg_entries.reindex(columns=fields.keys())
leg_entries.rename(columns=fields, inplace=True)
order = get_order(leg.direction, Signal.ENTRY)
leg_entries['order'] = order
# Change sign of cost for SELL orders
if leg.direction == Direction.SELL:
leg_entries['cost'] = -leg_entries['cost']
leg_entries['cost'] *= self.shares_per_contract
leg_entries.columns = pd.MultiIndex.from_product([[leg.name], leg_entries.columns])
entry_signals.append(leg_entries.reset_index(drop=True))
# Append the 'totals' column to entry_signals
total_costs = sum([leg_entry.droplevel(0, axis=1)['cost'] for leg_entry in entry_signals])
qty = options_allocation // abs(total_costs)
totals = pd.DataFrame.from_dict({'cost': total_costs, 'qty': qty, 'date': date})
totals.columns = pd.MultiIndex.from_product([['totals'], totals.columns])
entry_signals.append(totals)
entry_signals = pd.concat(entry_signals, axis=1)
# Remove signals where qty == 0
entry_signals = entry_signals[entry_signals['totals']['qty'] > 0]
entries = self._pick_entry_signals(entry_signals)
# Update options inventory, trade log and current cash
self._options_inventory = self._options_inventory.append(entries, ignore_index=True)
self.trade_log = self.trade_log.append(entries, ignore_index=True)
self.current_cash -= np.sum(entries['totals']['cost'] * entries['totals']['qty'])
def _execute_option_exits(self, date, options):
"""Exits option positions according to `self._options_strategy`.
Option positions are closed whenever the strategy signals an exit, when the profit/loss thresholds
are exceeded or whenever the contracts in `self._options_inventory` are not found in `options`.
Updates `self._options_inventory` and `self.current_cash`.
Args:
date (pd.Timestamp): Current date.
options (pd.DataFrame): Options data for the current time step.
"""
strategy = self._options_strategy
current_options_quotes = self._get_current_option_quotes(options)
filter_masks = []
for i, leg in enumerate(strategy.legs):
flt = leg.exit_filter
# This mask is to ensure that legs with missing contracts exit.
missing_contracts_mask = current_options_quotes[i]['cost'].isna()
filter_masks.append(flt(current_options_quotes[i]) | missing_contracts_mask)
fields = self._signal_fields((~leg.direction).value)
current_options_quotes[i] = current_options_quotes[i].reindex(columns=fields.values())
current_options_quotes[i].rename(columns=fields, inplace=True)
current_options_quotes[i].columns = pd.MultiIndex.from_product([[leg.name],
current_options_quotes[i].columns])
exit_candidates = pd.concat(current_options_quotes, axis=1)
# If a contract is missing we replace the NaN values with those of the inventory
# except for cost, which we imput as zero.
exit_candidates = self._impute_missing_option_values(exit_candidates)
# Append the 'totals' column to exit_candidates
qtys = self._options_inventory['totals']['qty']
total_costs = sum([exit_candidates[l.name]['cost'] for l in self._options_strategy.legs])
totals = pd.DataFrame.from_dict({'cost': total_costs, 'qty': qtys, 'date': date})
totals.columns = pd.MultiIndex.from_product([['totals'], totals.columns])
exit_candidates = pd.concat([exit_candidates, totals], axis=1)
# Compute which contracts need to exit, either because of price thresholds or user exit filters
threshold_exits = strategy.filter_thresholds(self._options_inventory['totals']['cost'], total_costs)
filter_mask = reduce(lambda x, y: x | y, filter_masks)
exits_mask = threshold_exits | filter_mask
exits = exit_candidates[exits_mask]
total_costs = total_costs[exits_mask] * exits['totals']['qty']
# Update options inventory, trade log and current cash
self._options_inventory.drop(self._options_inventory[exits_mask].index, inplace=True)
self.trade_log = self.trade_log.append(exits, ignore_index=True)
self.current_cash -= sum(total_costs)
def _pick_entry_signals(self, entry_signals):
"""Returns the entry signals to execute.
Args:
entry_signals (pd.DataFrame): DataFrame of option entry signals chosen by the strategy.
Returns:
pd.DataFrame: DataFrame of entries to execute.
"""
if not entry_signals.empty:
# FIXME: This is a naive signal selection criterion, it simply picks the first one in `entry_singals`
return entry_signals.iloc[0]
else:
return entry_signals
def _signal_fields(self, cost_field):
fields = {
self._options_schema['contract']: 'contract',
self._options_schema['underlying']: 'underlying',
self._options_schema['expiration']: 'expiration',
self._options_schema['type']: 'type',
self._options_schema['strike']: 'strike',
self._options_schema[cost_field]: 'cost',
'order': 'order'
}
return fields
def _get_current_option_quotes(self, options):
"""Returns the current quotes for all the options in `self._options_inventory` as a list of DataFrames.
It also adds a `cost` column with the cost of closing the position in each contract and an `order`
column with the corresponding exit order type.
Args:
options (pd.DataFrame): Options data in the current time step.
Returns:
[pd.DataFrame]: List of DataFrames, one for each leg in `self._options_inventory`,
with the exit cost for the contracts.
"""
current_options_quotes = []
for leg in self._options_strategy.legs:
inventory_leg = self._options_inventory[leg.name]
# This is a left join to ensure that the result has the same length as the inventory. If the contract
# isn't in the daily data the values will all be NaN and the filters should all yield False.
leg_options = inventory_leg[['contract']].merge(options,
how='left',
left_on='contract',
right_on=leg.schema['contract'])
# leg_options.index needs to be the same as the inventory's so that the exit masks that are constructed
# from it can be correctly applied to the inventory.
leg_options.index = self._options_inventory.index
leg_options['order'] = get_order(leg.direction, Signal.EXIT)
leg_options['cost'] = leg_options[self._options_schema[(~leg.direction).value]]
# Change sign of cost for SELL orders
if ~leg.direction == Direction.SELL:
leg_options['cost'] = -leg_options['cost']
leg_options['cost'] *= self.shares_per_contract
current_options_quotes.append(leg_options)
return current_options_quotes
def _impute_missing_option_values(self, exit_candidates):
"""Returns a copy of the inventory with the cost of all its contracts set to zero.
Args:
exit_candidates (pd.DataFrame): DataFrame of exit candidates with possible missing values.
Returns:
pd.DataFrame: Exit candidates with imputed values.
"""
df = self._options_inventory.copy()
for leg in self._options_strategy.legs:
df.at[:, (leg.name, 'cost')] = 0
return exit_candidates.fillna(df)
def __repr__(self):
return "Backtest(capital={}, allocation={}, stocks={}, strategy={})".format(
self.current_cash, self.allocation, self._stocks, self._options_strategy)
| 24,945 | 7,258 |
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
class MatchPredictor:
""" Class to calculates the probabilities for different scores (outcomes) of two teams.
Attributes
----------
l1 : float
Projected score for team 1 (expectation value for Poisson distribution)
l2 : float
Projected score for team 2 (expectation value for Poisson distribution)
"""
def __init__(self, l1=0.0, l2=0):
self._poisson_n_bins = 8
self.l1 = l1
self.l2 = l2
def poisson_pmf(self, l, n_bins=None):
""" Returns the probablity mass function of the Poissonian distribution with average number l
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.poisson.html
Parameters
----------
l : float
Average number of events per interval ("shape parameter")
n_bins : int
Number of bins. If None (default), the value from the class attribute _poisson_n_bins is used.
Returns
-------
Probability mass function of Poisson distribution
"""
if n_bins is None:
n_bins = self._poisson_n_bins
n = np.arange(0, n_bins)
return stats.poisson.pmf(n, l)
def calculate_score_probs(self, mode='all'):
""" Calculates the probabilities for different scores (outcomes) of two teams. The required information is
the expection value for their goal distributions l1 and l2 (class attributes).
Parameters
----------
mode : str, {'all' (default), 'draws', 'team1_wins', 'team2_wins'}
If 'all', the complete probabiliy matrix is returned. If 'draw' only the diagonal elements (corresponding to
all possible draws) are non-zero. If 'team1_wins', only the elements corresponding to outcomes where team 1
wins are non-zero. 'team2_wins' is analaog to 'team1_wins'.
Returns
-------
nd.array
The returned matrix is a quadratic 2x2 matrix. The first dimension corresponds to team 1, second dimension
to team 2. E.g. score_probs[2,1] gives the probability for the score being 2:1
"""
y1 = self.poisson_pmf(self.l1)
y2 = self.poisson_pmf(self.l2)
score_probs = np.tensordot(y1, y2, axes=0) # vector * vector => matrix
if mode == 'all':
pass
elif mode == 'draws':
# diagonal elements correspond to probabilites of the draws (0:0, 1:1, 2:2, ...)
score_probs = np.diag(np.diag(score_probs))
elif mode == 'team1_wins':
# elements of lower left triangle (excluding diagonals => k=-1) correspond to probabilies for outcomes at
# which team 1 wins (1:0, 2:0, 2:1, ...)
score_probs = np.tril(score_probs, k=-1)
elif mode == 'team2_wins':
# elements of upper right triangle (excluding diagonals => k=1) correspond to probabilies for outcomes at
# which team 2 wins (0:1, 0:2, 1:, ...)
score_probs = np.triu(score_probs, k=1)
else:
raise(ValueError('Invalid value for "mode".'))
return score_probs
@staticmethod
def plot_score_probs(score_probs):
fig, ax = plt.subplots()
fig.set_size_inches(5, 5)
ax.imshow(score_probs, cmap='jet')
ax.set_ylabel('Goals Team 1')
ax.set_xlabel('Goals Team 2')
ax.set_title('Score probabilites (%)')
# write probability (in %) in each element of the matrix
for (j, i), label in np.ndenumerate(score_probs):
ax.text(i, j, round(label*100, 1), ha='center', va='center')
plt.show()
def plot_poisson_pmf(self):
fig, ax = plt.subplots()
fig.set_size_inches(5, 5)
n_bins = np.arange(0, self._poisson_n_bins)
y1 = self.poisson_pmf(self.l1)
y2 = self.poisson_pmf(self.l2)
ax.plot(n_bins, y1, 'o-', color='red', label='Team 1')
ax.plot(n_bins, y2, 'o-', color='blue', label='Team 2')
ax.set_xlabel('Scored goals')
ax.set_ylabel('Probability')
ax.set_title('Poisson distribution')
ax.grid()
ax.legend()
plt.show()
@property
def probs_tendency(self):
""" Calculate the probability for the "tendency" of the outcome for a match played by two teams.
Returns
-------
list with 3 elements
[probability team 1 wins, probability team 2 wins, probabilty for a draw]
"""
p_team1 = np.sum(self.calculate_score_probs(mode='team1_wins'))
p_team2 = np.sum(self.calculate_score_probs(mode='team2_wins'))
p_draw = np.sum(self.calculate_score_probs(mode='draws'))
return [p_team1, p_team2, p_draw]
def prob_goal_difference(self, d, mode='all'):
""" Calculate the probability for the goal difference of the match played by two teams to be d.
Parameters
----------
d : int
Goal difference. Positive: team 1 wins, negative: team 2 wins, 0: draw
mode : str
Passed to call of calculate_score_probs. See definition there.
Returns
-------
float
Probability
"""
score_probs = self.calculate_score_probs(mode=mode)
k = -1*d
# Parameter k: defines which diagonal axis offset to main diagonal is used. The axis offset by -d corresponds to
# the outcomes with a goal difference of d.
return np.sum(np.diag(score_probs, k=k))
def most_likely_goal_difference(self, mode='all'):
# calculate probabilities for all possible goal differences (limited by the width of the Poisson distribution)
d_ar = np.arange(-(self._poisson_n_bins-1), self._poisson_n_bins)
prob = np.zeros(len(d_ar))
for idx, d in enumerate(d_ar):
prob[idx] = self.prob_goal_difference(d, mode)
return d_ar[np.argmax(prob)], np.max(prob)
def most_likely_score(self, d=None, mode='all'):
""" Returns the most likely score.
Parameters "mode" and "d" set furhter constrains on the subset of score probabilites to be considered.
Parameters
----------
d : int
Goal difference. Positive: team 1 wins, negative: team 2 wins, 0: draw
mode : str
Passed to call of calculate_score_probs. See definition there.
Returns
-------
tuple
([result], probability) e.g. ([2,1], 0.06)
"""
score_probs = self.calculate_score_probs(mode=mode)
if d is not None:
# Set all elements except the diagonal offset by -d to zero
# Remaining non-zero elements correspond to results with a goal difference of d.
score_probs = np.diag(np.diag(score_probs, k=-d), k=-d)
result = list(np.unravel_index(np.argmax(score_probs), score_probs.shape)) # gets the indicies with the highest
# probability inside score_probs as list.
# See: https://stackoverflow.com/questions/9482550/argmax-of-numpy-array-returning-non-flat-indices
prob = np.max(score_probs)
return result, prob
@property
def predicted_score(self):
# 1) Calculate most likely tendency
tendency = np.argmax(self.probs_tendency) # 0: team 1 wins, 1: team 2 wins, 2: draw
# 2) What is the most likely goal difference within the tendency
if tendency == 0:
mode ='team1_wins'
elif tendency == 1:
mode = 'team2_wins'
elif tendency == 2:
mode = 'draws'
else:
raise(ValueError('Invalid value for tendendy'))
d, _ = self.most_likely_goal_difference(mode=mode)
# 3) What is the most likely result with the predicted goal difference?
return self.most_likely_score(d=d, mode=mode)
| 7,948 | 2,488 |
from flask import Flask, render_template, request, send_file
from flask_pymongo import PyMongo
import json
import sg_core_api as sgapi
import os
import pathlib
import numpy as np
from bson.json_util import dumps
from bson.objectid import ObjectId
from datetime import datetime
from scipy.interpolate import CubicSpline
app = Flask(__name__)
gesture_generator = sgapi.get_gesture_generator()
root_path = pathlib.Path(__file__).parent
app.config["MONGO_URI"] = "mongodb://localhost" # setup your own db to enable motion library and rule functions
mongo = PyMongo(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/motion', methods=['GET', 'POST'])
def motion_library():
if request.method == 'POST':
json = request.get_json()
json["motion"] = sgapi.convert_pose_coordinate_for_ui(np.array(json["motion"])).tolist()
result = {}
try:
mongo.db.motion.insert_one(json)
result['msg'] = "success"
except Exception as e:
result['msg'] = "fail"
return result
elif request.method == 'GET':
try:
cursor = mongo.db.motion.find().sort("name", 1)
except AttributeError as e:
return {} # empty library
motions = sgapi.convert_pose_coordinate_for_ui_for_motion_library(list(cursor))
return dumps(motions)
else:
assert False
@app.route('/api/delete_motion/<id>', methods=['GET'])
def delete_motion_library(id):
result = mongo.db.motion.delete_one({'_id': ObjectId(id)})
msg = {}
if result.deleted_count > 0:
msg['msg'] = "success"
else:
msg['msg'] = "fail"
return msg
@app.route('/api/rule', methods=['GET', 'POST'])
def rule():
if request.method == 'POST':
json = request.get_json()
result = {}
try:
json['motion'] = ObjectId(json['motion'])
mongo.db.rule.insert_one(json)
result['msg'] = "success"
except Exception as e:
print(json)
print(e)
result['msg'] = "fail"
return result
elif request.method == 'GET':
pipeline = [{'$lookup':
{'from': 'motion',
'localField': 'motion',
'foreignField': '_id',
'as': 'motion_info'}},
]
try:
cursor = mongo.db.rule.aggregate(pipeline)
except AttributeError as e:
return {} # empty rules
rules = sgapi.convert_pose_coordinate_for_ui_for_rule_library(cursor)
rules = dumps(rules)
return rules
else:
assert False
@app.route('/api/delete_rule/<id>', methods=['GET'])
def delete_rule(id):
result = mongo.db.rule.delete_one({'_id': ObjectId(id)})
msg = {}
if result.deleted_count > 0:
msg['msg'] = "success"
else:
msg['msg'] = "fail"
return msg
@app.route('/api/input', methods=['POST'])
def input_text_post():
content = request.get_json()
input_text = content.get('text-input')
if input_text is None or len(input_text) == 0:
return {'msg': 'empty'}
print('--------------------------------------------')
print('request time:', datetime.now())
print('request IP:', request.remote_addr)
print(input_text)
kp_constraints = content.get('keypoint-constraints')
if kp_constraints:
pose_constraints_input = np.array(kp_constraints)
pose_constraints = sgapi.convert_pose_coordinate_for_model(np.copy(pose_constraints_input))
else:
pose_constraints = None
pose_constraints_input = None
style_constraints = content.get('style-constraints')
if style_constraints:
style_constraints = np.array(style_constraints)
else:
style_constraints = None
result = {}
result['msg'] = "success"
result['input-pose-constraints'] = pose_constraints_input.tolist() if pose_constraints_input is not None else None
result['input-style-constraints'] = style_constraints.tolist() if style_constraints is not None else None
result['input-voice'] = content.get('voice')
result['is-manual-scenario'] = content.get('is-manual-scenario')
if content.get('is-manual-scenario'):
# interpolate key poses
n_frames = pose_constraints_input.shape[0]
n_joints = int((pose_constraints_input.shape[1] - 1) / 3)
key_idxs = [i for i, e in enumerate(pose_constraints_input) if e[-1] == 1]
if len(key_idxs) >= 2:
out_gesture = np.zeros((n_frames, n_joints * 3))
xs = np.arange(0, n_frames, 1)
for i in range(n_joints):
pts = pose_constraints_input[key_idxs, i * 3:(i + 1) * 3]
cs = CubicSpline(key_idxs, pts, bc_type='clamped')
out_gesture[:, i * 3:(i + 1) * 3] = cs(xs)
result['output-data'] = out_gesture.tolist()
result['audio-filename'] = os.path.split(result['input-voice'])[
1] # WARNING: assumed manual mode uses external audio file
else:
result['msg'] = "fail"
else:
# run gesture generation model
output = gesture_generator.generate(input_text, pose_constraints=pose_constraints,
style_values=style_constraints, voice=content.get('voice'))
if output is None:
# something wrong
result['msg'] = "fail"
else:
gesture, audio, tts_filename, words_with_timestamps = output
gesture = sgapi.convert_pose_coordinate_for_ui(gesture)
result['audio-filename'] = os.path.split(tts_filename)[1] # filename without path
result['words-with-timestamps'] = words_with_timestamps
result['output-data'] = gesture.tolist()
return result
@app.route('/media/<path:filename>/<path:new_filename>')
def download_audio_file(filename, new_filename):
return send_file(os.path.join('./cached_wav', filename), as_attachment=True, attachment_filename=new_filename,
cache_timeout=0)
@app.route('/mesh/<path:filename>')
def download_mesh_file(filename):
mesh_path = root_path.joinpath("static", "mesh", filename)
return send_file(str(mesh_path), as_attachment=True, cache_timeout=0)
@app.route('/upload_audio', methods=['POST'])
def upload():
upload_dir = './cached_wav'
file_names = []
for key in request.files:
file = request.files[key]
_, ext = os.path.splitext(file.filename)
print('uploaded: ', file.filename)
try:
upload_path = os.path.join(upload_dir, "uploaded_audio" + ext)
file.save(upload_path)
file_names.append(upload_path)
except:
print('save fail: ' + os.path.join(upload_dir, file.filename))
return json.dumps({'filename': [f for f in file_names]})
if __name__ == '__main__':
app.run()
| 7,012 | 2,194 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import sys
from knack.util import CLIError, ensure_dir
from knack.log import get_logger
from six.moves import configparser
from .config import AZ_DEVOPS_GLOBAL_CONFIG_DIR
from .pip_helper import install_keyring
logger = get_logger(__name__)
class CredentialStore:
def __init__(self):
self._initialize_keyring()
def set_password(self, key, token):
try:
import keyring
except ImportError:
install_keyring()
self._initialize_keyring()
import keyring
try:
# check for and delete existing credential
old_token = keyring.get_password(key, self._USERNAME)
if old_token is not None:
keyring.delete_password(key, self._USERNAME)
logger.debug('Setting credential: %s', key)
keyring.set_password(key, self._USERNAME, token)
except Exception as ex: # pylint: disable=broad-except
# store credentials in azuredevops config directory if keyring is missing or malfunctioning
if sys.platform.startswith(self._LINUX_PLATFORM):
logger.warning('Failed to store PAT using keyring; falling back to file storage.')
logger.warning('You can clear the stored credential by running az devops logout.')
logger.warning('Refer https://aka.ms/azure-devops-cli-auth to know more on sign in with PAT.')
logger.debug('Keyring failed. ERROR :%s', ex)
logger.debug('Storing credentials in the file: %s', self._PAT_FILE)
creds_list = self._get_credentials_list()
if key not in creds_list.sections():
creds_list.add_section(key)
logger.debug('Added new entry to PAT file : %s ', key)
creds_list.set(key, self._USERNAME, token)
self._commit_change(creds_list)
else:
raise CLIError(ex)
def get_password(self, key):
try:
import keyring
except ImportError:
return None
token = None
try:
token = keyring.get_password(key, self._USERNAME)
except Exception as ex: # pylint: disable=broad-except
# fetch credentials from file if keyring is missing or malfunctioning
if sys.platform.startswith(self._LINUX_PLATFORM):
token = None
else:
raise CLIError(ex)
# look for credential in file too for linux if token is None
if token is None and sys.platform.startswith(self._LINUX_PLATFORM):
token = self.get_PAT_from_file(key)
return token
def clear_password(self, key):
try:
import keyring
except ImportError:
install_keyring()
self._initialize_keyring()
import keyring
if sys.platform.startswith(self._LINUX_PLATFORM):
keyring_token = None
file_token = None
try:
keyring_token = keyring.get_password(key, self._USERNAME)
if keyring_token:
keyring.delete_password(key, self._USERNAME)
except Exception as ex: # pylint: disable=broad-except
logger.debug("%s", ex)
finally:
file_token = self.get_PAT_from_file(key)
if file_token:
self.delete_PAT_from_file(key)
if(keyring_token is None and file_token is None):
raise CLIError(self._CRDENTIAL_NOT_FOUND_MSG)
else:
try:
keyring.delete_password(key, self._USERNAME)
except keyring.errors.PasswordDeleteError:
raise CLIError(self._CRDENTIAL_NOT_FOUND_MSG)
except RuntimeError as ex: # pylint: disable=broad-except
raise CLIError(ex)
def get_PAT_from_file(self, key):
ensure_dir(AZ_DEVOPS_GLOBAL_CONFIG_DIR)
logger.debug('Keyring not configured properly or package not found.'
'Looking for credentials with key:%s in the file: %s', key, self._PAT_FILE)
creds_list = self._get_credentials_list()
try:
return creds_list.get(key, self._USERNAME)
except (configparser.NoOptionError, configparser.NoSectionError):
return None
def delete_PAT_from_file(self, key):
logger.debug('Keyring not configured properly or package not found.'
'Looking for credentials with key:%s in the file: %s', key, self._PAT_FILE)
creds_list = self._get_credentials_list()
if key not in creds_list.sections():
raise CLIError(self._CRDENTIAL_NOT_FOUND_MSG)
creds_list.remove_section(key)
self._commit_change(creds_list)
@staticmethod
def _get_config_parser():
if sys.version_info.major == 3:
return configparser.ConfigParser(interpolation=None)
return configparser.ConfigParser()
@staticmethod
def _get_credentials_list():
try:
credential_list = CredentialStore._get_config_parser()
credential_list.read(CredentialStore._PAT_FILE)
return credential_list
except BaseException: # pylint: disable=broad-except
return CredentialStore._get_config_parser()
@staticmethod
def _commit_change(credential_list):
with open(CredentialStore._PAT_FILE, 'w+') as creds_file:
credential_list.write(creds_file)
@staticmethod
def _initialize_keyring():
try:
import keyring
except ImportError:
return
def _only_builtin(backend):
return (
backend.__module__.startswith('keyring.backends.') and
'chain' not in backend.__module__
)
keyring.core.init_backend(_only_builtin)
logger.debug('Keyring backend : %s', keyring.get_keyring())
# a value is required for the python config file that gets generated on some operating systems.
_USERNAME = 'Personal Access Token'
_LINUX_PLATFORM = 'linux'
_PAT_FILE = os.path.join(AZ_DEVOPS_GLOBAL_CONFIG_DIR, 'personalAccessTokens')
_CRDENTIAL_NOT_FOUND_MSG = 'The credential was not found'
| 6,662 | 1,823 |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/AdverseEvent
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import adverseevent
def impl_adverseevent_1(inst):
assert inst.category == "AE"
assert inst.date == fhirtypes.DateTime.validate("2017-01-29T12:34:56+00:00")
assert inst.description == "This was a mild rash on the left forearm"
assert inst.id == "example"
assert inst.identifier.system == "http://acme.com/ids/patients/risks"
assert inst.identifier.value == "49476534"
assert inst.recorder.reference == "Practitioner/example"
assert inst.seriousness.coding[0].code == "Mild"
assert inst.seriousness.coding[0].display == "Mild"
assert (
inst.seriousness.coding[0].system
== "http://hl7.org/fhir/adverse-event-seriousness"
)
assert inst.subject.reference == "Patient/example"
assert inst.suspectEntity[0].instance.reference == "Medication/example"
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "304386008"
assert inst.type.coding[0].display == "O/E - itchy rash"
assert inst.type.coding[0].system == "http://snomed.info/sct"
def test_adverseevent_1(base_settings):
"""No. 1 tests collection for AdverseEvent.
Test File: adverseevent-example.json
"""
filename = base_settings["unittest_data_dir"] / "adverseevent-example.json"
inst = adverseevent.AdverseEvent.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "AdverseEvent" == inst.resource_type
impl_adverseevent_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "AdverseEvent" == data["resourceType"]
inst2 = adverseevent.AdverseEvent(**data)
impl_adverseevent_1(inst2)
| 1,969 | 714 |
from math import sqrt
def num_pins_full_row(n: int, k: int) -> int:
return (n // k + 1) * k + n % k + (n % k > 0) if n > 0 else 0
def num_pins_square(n: int, k: int) -> int:
m = int(sqrt(n))
used_pins = (m + 1)**2
n -= m * m
if 0 < n <= m:
used_pins += n + 1
elif n > m:
used_pins += n + 2
return used_pins if m > 0 else 0
data = tuple(map(int, input().split()))
print(min(num_pins_full_row(*data), num_pins_square(*data))) | 473 | 212 |
from datetime import datetime
FORMAT_CHAR = '\x19'
# These are non-printable chars, so they should never appear in the input,
# I guess. But maybe we can find better chars that are even less risky.
FORMAT_CHARS = '\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x1A'
| 265 | 112 |
"""
Horizontal:
Adds movement functions along the horizontal (X) axis to a game object
"""
class Horizontal:
def __init__(self, speed):
self.speed = speed
self.current_speed = self.speed.x
def _change_speed(self, value):
self.current_speed = value
def left(self):
self._change_speed(-self.speed.x)
def right(self):
self._change_speed(self.speed.x)
def stop(self):
self._change_speed(0)
| 462 | 148 |
import json
class Factory:
def __init__(self):
pass
@staticmethod
def create(self):
pass
@staticmethod
def get_template(self):
pass
def get_config(self):
with open('json/config.json', 'r') as file:
# Convert the contents of the file to a Python dictionary
config = json.loads(file.read())
return config | 403 | 112 |
#coding: utf-8
# Configuration file for NREL data call
# This file sets up the calls for data from NREL.
# 26/05/2016 Greg Jackson AESE Labs Imperial College
from datetime import date, datetime, timedelta
debug = True
# Location variables, determines locations from which calls will be made
# locations = [("New York",40.7127837, -74.0059413),("Haiti", 18.5790242, -72.3544683 ),("Seattle", 47.6147628,-122.4759903 ),("Toronto", 43.7181557,-79.5181432 ),("Brazilia",-15.7217175,-48.0783247)]
locations = [("New York",40.7127837, -74.0059413)]
#Set collection variables
api_key= # API Key here
attributes = 'ghi,dhi,dni,wind_speed_10m_nwp,surface_air_temperature_nwp,solar_zenith_angle,clearsky_dhi,clearsky_dni,clearsky_ghi,cloud_type,dew_point,fill_flag,surface_pressure_background,surface_relative_humidity_nwp,wind_direction_10m_nwp,total_precipitable_water_nwp'
years = [1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014]
interval = '30'
utc = 'false'
your_name = # Name here
reason_for_use = 'beta+testing'
your_affiliation = 'Fantastic'
your_email = #Email Here
mailing_list = 'false'
# Function calls for determining length of data collection
start = datetime(1998,1,1)
end = datetime(2015,1,1)
| 1,248 | 595 |
import pandas as pd
import numpy as np
def topK_neighbors_to_candidate_set(topK_neighbors):
#We create a data frame corresponding to topK neighbors.
# We are given a 2D matrix of the form 1: [a1, a2, a3], 2: [b1, b2, b3]
# where a1, a2, a3 are the top-3 neighbors for tuple 1 and so on.
# We will now create a two column DF fo the form (1, a1), (1, a2), (1, a3), (2, b1), (2, b2), (2, b3)
topK_df = pd.DataFrame(topK_neighbors)
topK_df["ltable_id"] = topK_df.index
melted_df = pd.melt(topK_df, id_vars=["ltable_id"])
melted_df["rtable_id"] = melted_df["value"]
candidate_set_df = melted_df[["ltable_id", "rtable_id"]]
return candidate_set_df
def thresholded_pairs_to_candidate_set(thresholded_pairs):
# Merge record pair arrays to create DataFrame of candidate pairs
merged_arr = np.vstack((thresholded_pairs[0], thresholded_pairs[1])).T
candidate_set_df = pd.DataFrame(merged_arr, columns=["ltable_id", "rtable_id"])
return candidate_set_df
#This accepts four inputs:
# data frames for candidate set and ground truth matches
# left and right data frames
def compute_blocking_statistics(candidate_set_df, golden_df, left_df, right_df):
#Now we have two data frames with two columns ltable_id and rtable_id
# If we do an equi-join of these two data frames, we will get the matches that were in the top-K
merged_df = pd.merge(candidate_set_df, golden_df, on=['ltable_id', 'rtable_id'])
# Added to calculate total false positives
false_pos = candidate_set_df[~candidate_set_df['ltable_id'].isin(merged_df['ltable_id'])|(~candidate_set_df['rtable_id'].isin(merged_df['rtable_id']))]
left_num_tuples = len(left_df)
right_num_tuples = len(right_df)
statistics_dict = {
"left_num_tuples": left_num_tuples,
"right_num_tuples": right_num_tuples,
"candidate_set_length": len(candidate_set_df),
"golden_set_length": len(golden_df),
"merged_set_length": len(merged_df),
"false_positives_length": len(false_pos),
"precision": len(merged_df) / (len(merged_df) + len(false_pos)) if len(golden_df) > 0 else "N/A",
"recall": len(merged_df) / len(golden_df) if len(golden_df) > 0 else "N/A",
"cssr": len(candidate_set_df) / (left_num_tuples * right_num_tuples)
}
return statistics_dict
def compute_join_percentage(candidate_set_df, left_df, right_df):
THRESHOLD = 20
left_num_tuples = len(left_df)
right_num_tuples = len(right_df)
left_percent_join = 100 * round(candidate_set_df['ltable_id'].unique().shape[0] / left_num_tuples, 3)
right_percent_join = 100 * round(candidate_set_df['rtable_id'].unique().shape[0] / right_num_tuples, 3)
total_percent_join = 100 * round((candidate_set_df['ltable_id'].unique().shape[0] + candidate_set_df['rtable_id'].unique().shape[0]) / (left_num_tuples + right_num_tuples), 3)
statistics_dict = {
"left_num_tuples": left_num_tuples,
"right_num_tuples": right_num_tuples,
"candidate_set_length": len(candidate_set_df),
"left_percent_join": f"{left_percent_join}%",
"right_percent_join": f"{right_percent_join}%",
"right_percent_join": f"{right_percent_join}%",
"total_percent_join": f"{total_percent_join}%",
"prediction": "JOIN" if max(left_percent_join, right_percent_join) > THRESHOLD else "NO JOIN",
"cssr": len(candidate_set_df) / (left_num_tuples * right_num_tuples)
}
return statistics_dict
def compute_column_statistics(table_names,candidate_set_df, golden_df,left_df, right_df):
candidate_set_df = candidate_set_df.astype('str')
candidate_set_df['ltable_id_table'] = candidate_set_df['ltable_id'].apply(lambda x: left_df.columns[int(x)])
candidate_set_df['ltable_id_table'] = table_names[0] + '.' + candidate_set_df['ltable_id_table']
candidate_set_df['rtable_id_table'] = candidate_set_df['rtable_id'].apply(lambda x: right_df.columns[int(x)])
candidate_set_df['rtable_id_table'] = table_names[1] + '.' + candidate_set_df['rtable_id_table']
candidate_set_df = candidate_set_df[['ltable_id_table','rtable_id_table']].rename(columns={'ltable_id_table':'ltable_id','rtable_id_table':'rtable_id'})
merged_df = pd.merge(candidate_set_df, golden_df, on=['ltable_id', 'rtable_id'])
# Added to calculate total false positives
false_pos = candidate_set_df[~candidate_set_df['ltable_id'].isin(merged_df['ltable_id'])|(~candidate_set_df['rtable_id'].isin(merged_df['rtable_id']))]
if len(golden_df) > 0 and (len(merged_df) + len(false_pos)) > 0:
fp = float(len(merged_df)) / (len(merged_df) + len(false_pos))
else:
fp = "N/A"
left_num_columns = len(left_df.columns)
right_num_columns = len(right_df.columns)
statistics_dict = {
"left_table": table_names[0],
"right_table": table_names[1],
"left_num_columns": left_num_columns,
"right_num_columns": right_num_columns,
"candidate_set_length": len(candidate_set_df),
"candidate_set": candidate_set_df,
"golden_set_length": len(golden_df),
"golden_set": golden_df,
"merged_set_length": len(merged_df),
"merged_set": merged_df,
"false_positives_length": len(false_pos),
"false_positives": false_pos,
"precision": fp,
"recall": float(len(merged_df)) / len(golden_df) if len(golden_df) > 0 else "N/A",
"cssr": len(candidate_set_df) / (left_num_columns * right_num_columns)
}
return statistics_dict
#This function is useful when you download the preprocessed data from DeepMatcher dataset
# and want to convert to matches format.
#It loads the train/valid/test files, filters the duplicates,
# and saves them to a new file called matches.csv
def process_files(folder_root):
df1 = pd.read_csv(folder_root + "/train.csv")
df2 = pd.read_csv(folder_root + "/valid.csv")
df3 = pd.read_csv(folder_root + "/test.csv")
df1 = df1[df1["label"] == 1]
df2 = df2[df2["label"] == 1]
df3 = df3[df3["label"] == 1]
df = pd.concat([df1, df2, df3], ignore_index=True)
df[["ltable_id","rtable_id"]].to_csv(folder_root + "/matches.csv", header=True, index=False)
| 6,239 | 2,386 |
#!/usr/bin/python3
"""Place unittests"""
import unittest
from models.place import Place
import datetime
import time
class TestPlace(unittest.TestCase):
"""class TestPlace"""
def test_place_class_membership_and_attributes(self):
"""Place is right class with correct attrs"""
place = Place()
self.assertIsNotNone(place.id)
self.assertIsNotNone(place.created_at)
self.assertIsNotNone(place.updated_at)
self.assertIsInstance(place, Place)
self.assertIsNotNone(place.city_id)
self.assertIsNotNone(place.user_id)
self.assertIsNotNone(place.name)
self.assertIsNotNone(place.description)
self.assertIsNotNone(place.number_rooms)
self.assertIsNotNone(place.number_bathrooms)
self.assertIsNotNone(place.max_guest)
self.assertIsNotNone(place.price_by_night)
self.assertIsNotNone(place.latitude)
self.assertIsNotNone(place.longitude)
self.assertIsNotNone(place.amenity_ids)
def test_place_attr_type(self):
"""Place attributes are correct type"""
place = Place()
self.assertIsInstance(place.id, str)
self.assertEqual(len(place.id), 36)
self.assertIsInstance(place.created_at, datetime.datetime)
self.assertIsInstance(place.updated_at, datetime.datetime)
self.assertIsInstance(place.city_id, str)
self.assertIsInstance(place.user_id, str)
self.assertIsInstance(place.name, str)
self.assertIsInstance(place.description, str)
self.assertIsInstance(place.number_rooms, int)
self.assertIsInstance(place.number_bathrooms, int)
self.assertIsInstance(place.max_guest, int)
self.assertIsInstance(place.price_by_night, int)
self.assertIsInstance(place.latitude, float)
self.assertIsInstance(place.longitude, float)
self.assertIsInstance(place.amenity_ids, list)
def test_place_updated_at_matches_created_at_initialization(self):
"""Place updated_at is same as create_at"""
place = Place()
self.assertEqual(place.updated_at, place.created_at)
def test_place_str_method(self):
"""Place str method creates accurate representation"""
place = Place()
place_str = place.__str__()
self.assertIsInstance(place_str, str)
self.assertEqual(place_str[:7], '[Place]')
self.assertEqual(place_str[8:46], '({})'.format(place.id))
self.assertDictEqual(eval(place_str[47:]), place.__dict__)
def test_place_save_method(self):
"""Place save method alters update_at date"""
place = Place()
time.sleep(0.0001)
place.save()
self.assertNotEqual(place.updated_at, place.created_at)
def test_place_to_dict_method(self):
"""Place to_dict method creates accurate dictionary"""
place = Place()
place_dict = place.to_dict()
self.assertIsInstance(place_dict, dict)
self.assertEqual(place_dict['id'], place.id)
self.assertEqual(place_dict['__class__'], type(place).__name__)
self.assertEqual(
place_dict['created_at'], place.created_at.isoformat())
self.assertEqual(
place_dict['updated_at'], place.updated_at.isoformat())
self.assertIsInstance(place.created_at, datetime.datetime)
self.assertIsInstance(place.updated_at, datetime.datetime)
def test_place_dict_to_instance_with_kwargs(self):
"""Place can instantiate new object with dictionary"""
place = Place()
place.name = "Betty"
place.number = 972
place_dict = place.to_dict()
new_place = Place(**place_dict)
new_place_dict = new_place.to_dict()
self.assertFalse(new_place is place)
self.assertDictEqual(new_place_dict, place_dict)
def test_place_dict_to_instance_with_empty_kwargs(self):
"""Place can instantiate new object with empty dict"""
place_dict = {}
new_place = Place(**place_dict)
new_place_dict = new_place.to_dict()
self.assertIsInstance(new_place, Place)
self.assertIsNotNone(new_place.id)
self.assertIsNotNone(new_place.created_at)
self.assertIsNotNone(new_place.updated_at)
if __name__ == '__main__':
unittest.main()
| 4,316 | 1,320 |
# coding: utf-8
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
both_coorporate_utility = 3
both_defect_utility = 1
looser_utility = 0
winner_utility = 3
a_resources = 2
b_resources = 2
a_actions = []
b_actions = []
a_utility = []
b_utility = []
rounds = 20
# Defect: action 0
# Cooperate: action 1
def evaluate_strategy(a, b):
if(a == 1 and b == 1): # both coorporate
return(both_coorporate_utility, both_coorporate_utility)
elif(a == 1 and b == 0): # a coorporate, b defect
return(looser_utility, winner_utility)
elif(a == 0 and b == 1): # a defect, be coorporate
return(winner_utility, looser_utility)
elif(a == 0 and b == 0): # both defect
return(both_defect_utility, both_defect_utility)
def tit_for_tat(me, opponent, t):
if(t == 0):
return(1)
return(opponent[t-1])
# play the game the defined amount of rounds
for t in range(rounds):
a_strategy = tit_for_tat(a_actions, b_actions, t)
b_strategy = round(np.random.rand()) # random strategy
a_actions.append(a_strategy)
b_actions.append(b_strategy)
[a_result, b_result] = evaluate_strategy(a_strategy, b_strategy)
a_utility.append(a_result)
b_utility.append(b_result)
ax = plt.subplot(1,1,1)
ax.plot(np.linspace(1,len(a_utility), len(a_utility)), a_utility, label='Tit for tat')
ax.plot(np.linspace(1,len(b_utility), len(b_utility)), b_utility, label='Random')
ax.set_title('Iteraded prisoners')
ax.set_xlabel('Iterations')
ax.set_ylabel('Utility')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
ax = plt.subplot(1,1,1)
ax.plot(np.linspace(1,len(a_actions), len(a_actions)), a_actions, label='Tit for tat')
ax.plot(np.linspace(1,len(b_actions), len(b_actions)), b_actions, label='Random')
ax.set_title('Iteraded prisoners')
ax.set_xlabel('Iterations')
ax.set_ylabel('Action')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
| 1,989 | 801 |
from enum import Enum
class ActualIs(Enum):
HIGHER = 1
MATCH = 0
LOWER = -1
def higher_lower(min_value, max_value, callback):
assert isinstance(max_value, int)
assert isinstance(min_value, int)
assert max_value > min_value
candidate = midpoint(min_value, max_value)
while True:
result = callback(candidate)
if result is ActualIs.MATCH:
return candidate
elif result is ActualIs.LOWER:
# lower
max_value = candidate
candidate = midpoint(min_value, candidate)
elif result is ActualIs.HIGHER:
# higher
min_value = candidate
candidate = midpoint(candidate, max_value)
else:
assert False, "Should be a ActualIs enum constant"
def midpoint(x, y):
return x + ((y - x) // 2)
| 844 | 257 |
# -*- coding: utf-8 -*-
#
# satcfe/resposta/enviardadosvenda.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
from decimal import Decimal
from io import StringIO
from builtins import str as text
from satcomum.ersat import dados_qrcode
from ..excecoes import ExcecaoRespostaSAT
from ..util import as_datetime
from ..util import base64_to_str
from .padrao import RespostaSAT
from .padrao import analisar_retorno
EMITIDO_COM_SUCESSO = '06000'
class RespostaEnviarDadosVenda(RespostaSAT):
"""Lida com as respostas da função ``EnviarDadosVenda`` (veja o método
:meth:`~satcfe.base.FuncoesSAT.enviar_dados_venda`). Os atributos
esperados em caso de sucesso, são:
.. sourcecode:: text
numeroSessao (int)
EEEEE (text)
CCCC (text)
mensagem (text)
cod (text)
mensagemSEFAZ (text)
arquivoCFeSAT (text)
timeStamp (datetime.datetime)
chaveConsulta (text)
valorTotalCFe (decimal.Decimal)
CPFCNPJValue (text)
assinaturaQRCODE (text)
Em caso de falha, são esperados apenas os atributos:
.. sourcecode:: text
numeroSessao (int)
EEEEE (text)
CCCC (text)
mensagem (text)
cod (text)
mensagemSEFAZ (text)
Finalmente, como último recurso, a resposta poderá incluir apenas os
atributos padrão, conforme descrito na constante
:attr:`~satcfe.resposta.padrao.RespostaSAT.CAMPOS`.
.. note::
Aqui, ``text`` diz respeito à um objeto ``unicode`` (Python 2) ou
``str`` (Python 3). Veja ``builtins.str`` da biblioteca ``future``.
"""
def xml(self):
"""Retorna o XML do CF-e-SAT decodificado de Base64.
:rtype: str
"""
if self._sucesso():
return base64_to_str(self.arquivoCFeSAT)
else:
raise ExcecaoRespostaSAT(self)
def qrcode(self):
"""Resulta nos dados que compõem o QRCode.
:rtype: str
"""
if self._sucesso():
tree = ET.parse(StringIO(self.xml()))
return dados_qrcode(tree)
else:
raise ExcecaoRespostaSAT(self)
def _sucesso(self):
return self.EEEEE == EMITIDO_COM_SUCESSO
@staticmethod
def analisar(retorno):
"""Constrói uma :class:`RespostaEnviarDadosVenda` a partir do
retorno informado.
:param str retorno: Retorno da função ``EnviarDadosVenda``.
"""
resposta = analisar_retorno(
retorno,
funcao='EnviarDadosVenda',
classe_resposta=RespostaEnviarDadosVenda,
campos=(
('numeroSessao', int),
('EEEEE', text),
('CCCC', text),
('mensagem', text),
('cod', text),
('mensagemSEFAZ', text),
('arquivoCFeSAT', text),
('timeStamp', as_datetime),
('chaveConsulta', text),
('valorTotalCFe', Decimal),
('CPFCNPJValue', text),
('assinaturaQRCODE', text),
),
campos_alternativos=[
# se a venda falhar apenas os primeiros seis campos
# especificados na ER deverão ser retornados...
(
('numeroSessao', int),
('EEEEE', text),
('CCCC', text),
('mensagem', text),
('cod', text),
('mensagemSEFAZ', text),
),
# por via das dúvidas, considera o padrão de campos,
# caso não haja nenhuma coincidência...
RespostaSAT.CAMPOS,
]
)
if resposta.EEEEE not in (EMITIDO_COM_SUCESSO,):
raise ExcecaoRespostaSAT(resposta)
return resposta
| 4,819 | 1,486 |
from django.apps import AppConfig
class SongsArtistsConfig(AppConfig):
name = 'songs_artists'
| 100 | 32 |
from setuptools import setup, find_packages
description_files = ['README.md', 'AUTHORS.md', 'CHANGELOG.md']
setup(
name="arnold",
description="RPi 4 Based Robotic Platform",
long_description="".join([open(f, 'r').read() for f in description_files]),
version="0.0.1",
author='Hacklab',
author_email="dev@hacklab.co.za",
license="BSD",
url="http://github.com/hacklabza/arnold",
packages=find_packages(),
dependency_links=[],
install_requires=list(open('requirements.txt', 'r').read().splitlines()),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent"
],
zip_safe=False,
include_package_data=True,
entry_points={'console_scripts': ['arnold = arnold.cli:cli']}
)
| 977 | 319 |
# Generated by Django 3.1.3 on 2020-11-20 21:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('spaweb', '0009_product_number_users'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='number_users',
),
migrations.RemoveField(
model_name='product',
name='category',
),
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='category_products', to='spaweb.productcategory', verbose_name='категория'),
),
]
| 769 | 237 |
from django.apps import AppConfig
class MemberappConfig(AppConfig):
name = 'MemberApp'
| 93 | 28 |
import pathlib
import pkgutil
import typing
from contextlib import contextmanager
import click
from jinja2 import Template
from . import formatutils as fmt
class Templates:
def __init__(self, context: dict):
self.context = context
@staticmethod
def _get(name: str) -> Template:
path = str(pathlib.Path("templates", name))
content: bytes = pkgutil.get_data("bocadillo_cli", path)
if content is None:
raise ValueError(f"Template not found: {name}")
return Template(content.decode("utf-8"))
def render(self, name: str) -> str:
return self._get(f"{name}.jinja").render(self.context)
class Writer:
CREATE = fmt.success("CREATE")
SKIP = fmt.muted("SKIP")
def __init__(self, dry: bool, no_input: bool, templates: Templates):
self.dry = dry
self.no_input = no_input
self.templates = templates
self.root = None
def mkdir(self, path: pathlib.Path, **kwargs):
if path.exists():
action = self.SKIP
else:
action = self.CREATE
if not self.dry:
path.mkdir(**kwargs)
click.echo(f"{action} {path} {fmt.muted('directory')}")
def writefile(self, path: pathlib.Path, content: str):
if path.exists() and (
self.no_input
or not click.confirm(
fmt.pre_warn(
f"File {fmt.code(path)} already exists. Overwrite?"
)
)
):
nbytes = None
action = self.SKIP
else:
if not self.dry:
with open(str(path), "w", encoding="utf-8") as f:
f.write(content)
f.write("\n")
nbytes = len(content.encode())
action = self.CREATE
nbytes_formatted = fmt.muted(f" ({nbytes} bytes)") if nbytes else ""
click.echo(f"{action} {path}{nbytes_formatted}")
def writetemplate(self, *names: str, root: pathlib.Path = None) -> None:
if root is None:
assert self.root is not None
root = self.root
for name in names:
content = self.templates.render(name)
path = pathlib.Path(root, name)
self.writefile(path, content)
@contextmanager
def cd(self, directory: pathlib.Path):
self.mkdir(directory, exist_ok=True)
self.root = directory
try:
yield self
finally:
self.root = None
def generate(self, config: typing.Dict[str, typing.List[str]]):
for directory, filenames in config.items():
with self.cd(directory):
for filename in filenames:
self.writetemplate(filename)
| 2,769 | 801 |
import bpy, os
# Select Expressions
mesh_expr = ["*Blink*", "*Attack*", "*Ouch*", "*Talk*", "*Capture*", "*Ottotto*", "*Escape*", "*Half*", "*Pattern*", "*Result*", "*Harf*","*Hot*", "*Heavy*", "*Voice*", "*Fura*", "*Throw*", "*Catch*", "*Cliff*", "*FLIP*", "*Bound*", "*Down*", "*Bodybig*", "*Final*", "*Result*", "*StepPose*", "*Sorori*", "*Fall*", "*Appeal*", "*DamageFlyFront*", "*CameraHit*"]
# Make collections for each expressions
bpy.ops.object.select_all(action='DESELECT')
for exp in mesh_expr:
bpy.ops.object.select_pattern(pattern=exp)
selectNum = 0
for obj in bpy.data.objects:
if obj.select_get():
selectNum += 1
print(exp + " -> " + obj.name)
co = bpy.data.collections
if selectNum > 0:
if exp in co:
collect = co[exp]
else:
collect = co.new(name=exp)
bpy.context.view_layer.active_layer_collection.collection.children.link(collect)
for obj in bpy.data.objects:
if obj.select_get():
bpy.ops.collection.objects_remove_active()
collect.objects.link(obj)
collect.hide_viewport = True
collect.hide_render = True
bpy.ops.object.select_all(action='DESELECT')
#bpy.ops.object.select_all(action='TOGGLE')
#bpy.ops.object.select_pattern(pattern="*Openblink*")
#bpy.ops.object.select_pattern(pattern="*FaceN*")
# Change image filepaths to be relative to the Blender file
for image in bpy.data.images:
filename = os.path.basename(image.filepath)
image.filepath = os.path.join("//", filename)
| 1,637 | 588 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import os
import ee
""" Generate a table with all the tasks that are running (or has finished) in GEE.
>>> genreport()
>>> quota(ID)
This module, which is used for checking earth engine quota,
were obtained from geeup: a Python CLI for Earth Engine Uploads
with Selenium Support <https://github.com/samapriya/geeup>, the
acknowledgement for this module should be always given to Samapriya
Roy.
This module is used in R/ee_manage.R
"""
def genreport():
"""Generated report includes taskId, data time, task status and type
Args:
Examples:
>>> genreport()
"""
taks_list = []
status = ee.data.getTaskList()
for items in status:
ttype = items["task_type"]
tdesc = items["description"]
tstate = items["state"]
tid = items["id"]
tcreate = datetime.datetime.fromtimestamp(
items["creation_timestamp_ms"] / 1000
).strftime("%Y-%m-%d %H:%M:%S")
tstart = datetime.datetime.fromtimestamp(
items["start_timestamp_ms"] / 1000
).strftime("%Y-%m-%d %H:%M:%S")
tupdate = datetime.datetime.fromtimestamp(
items["update_timestamp_ms"] / 1000
).strftime("%Y-%m-%d %H:%M:%S")
tdiffstart = (
items["start_timestamp_ms"] / 1000 - items["creation_timestamp_ms"] / 1000
)
tdiffend = (
items["update_timestamp_ms"] / 1000 - items["start_timestamp_ms"] / 1000
)
try:
error_message = items["error_message"]
except:
error_message = "NULL"
dict_summary = {
"tid": tid,
"tstate": tstate,
"tdesc": tdesc,
"ttype": ttype,
"tcreate": tcreate,
"tdiffstart": tdiffstart,
"tdiffend": tdiffend,
"error_message": error_message,
}
taks_list.append(dict_summary)
return taks_list
"""Function to return quota usage details for the asset root with the given ID.
>>> humansize(nbytes)
>>> quota(ID)
This function, which is used for checking earth engine quota,
were obtained from geeup: a Python CLI for Earth Engine Uploads with Selenium
Support <https://github.com/samapriya/geeup>, the acknowledgement for these
functions should be always given to Samapriya Roy.
This function is used in R/ee_quota.R
"""
def quota(ID):
"""Print your earth engine quota quickly.
Args:
ID (str): The ID of the asset to check
Examples:
>>> quota('/users/csaybar')
"""
quota = ee.data.getAssetRootQuota(ID)
total_msg = str(quota["asset_size"]["limit"])
used_msg = str(quota["asset_size"]["usage"])
# return 'Total Quota: %s \n Used Quota: %s' % (total_msg, used_msg)
return [total_msg, used_msg]
| 2,836 | 921 |
"""
Strip silence from recorded audio, move to file server(s).
(c) Ameryn Media LLC, 2015. All rights reserved.
"""
import os
import datetime
from ConfigParser import ConfigParser
import shutil
import pydub
CONFIG_LOCATION = 'ameryn.ini'
config = ConfigParser()
config.read(CONFIG_LOCATION)
# Input file parameters (name & location)
overwrite_source = config.getboolean('strip', 'overwrite_source')
output_suffix = config.getboolean('strip', 'output_suffix')
input_path = config.get('strip', 'input_path')
output_path = config.get('strip', 'output_path')
silence_thresh = config.getint('strip', 'silence_thresh') # dBFS
silence_chunk = config.getint('strip', 'silence_chunk') # Seconds
recorded_archive = config.get('general', 'recorded_archive')
def strip_file(input_filename):
input_filename_fullpath = os.path.join(input_path, input_filename)
if overwrite_source:
output_filename = input_filename
elif output_suffix:
output_filename = input_filename.rsplit('.wav')[0]+'_stripped.wav'
else:
output_filename = input_filename
output_filename_fullpath = os.path.join(output_path, output_filename)
print input_filename_fullpath,'->', output_filename_fullpath
audio = pydub.AudioSegment.from_wav(input_filename_fullpath)
i = 0
new_audio = None
done = False
# Silence removal
while not done:
if i+silence_chunk > len(audio):
# Last chunk
current_chunk = audio[i:]
done = True
else:
current_chunk = audio[i:i+silence_chunk]
if current_chunk.dBFS > silence_thresh:
if not new_audio:
print 'NEWWWWWWWWW'
new_audio = current_chunk
else:
new_audio += current_chunk
print str(datetime.timedelta(milliseconds=i))+' - '+str(datetime.timedelta(milliseconds=i+silence_chunk))+': '+ \
str(round(current_chunk.dBFS, 2))+' dBFS'
else:
print str(datetime.timedelta(milliseconds=i))+' - '+str(datetime.timedelta(milliseconds=i+silence_chunk))+': '+ \
str(round(current_chunk.dBFS, 2))+' dBFS (Silence, below '+str(silence_thresh)+' dBFS)'
i += silence_chunk
# Export audio
if new_audio:
out_f = open(output_filename_fullpath, 'wb')
new_audio.export(out_f, format='wav')
else:
silent_files.append(input_filename)
# Move original .wav & .pkf files to Julius archive
archive_path = os.path.join(recorded_archive, str(datetime.datetime.now().year) + '-' + str(datetime.datetime.now().month).zfill(2))
if not os.path.isdir(archive_path):
os.makedirs(archive_path)
shutil.move(input_filename_fullpath, os.path.join(archive_path, input_filename))
shutil.move(os.path.splitext(input_filename_fullpath)[0] + '.pkf', os.path.join(archive_path, os.path.splitext(input_filename)[0] + '.pkf'))
silent_files = []
# Make input filename list
if overwrite_source:
output_path = input_path
input_filename_list = [f for f in os.listdir(input_path) if f.endswith('.wav') and '_stripped' not in f]
print 'Input filenames (folder):', input_filename_list
# RUN THE TRAP
for each in input_filename_list:
print each
strip_file(each)
if silent_files:
print
print 'Silent files (re-record?):'
for each in silent_files:
print ' - ' + each | 3,104 | 1,160 |
# Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
import json
from bottle import request
from _sadm import log
from _sadm.listen.errors import error
from _sadm.listen.webhook.repo.vcs.git import GitRepo
__all__ = ['exech']
_taskman = {
'webhook.repo.git': GitRepo(),
}
def handle(task, action):
log.debug("exec handle: %s %s" % (task, action))
taskman = _taskman.get(task, None)
if taskman is None:
raise error(500, "listen.exec task %s: no manager" % task)
try:
args = json.load(request.body)
taskman.hook(action, args)
except Exception as err:
raise error(500, "%s" % err)
return 'OK\n'
| 636 | 256 |
import time
minlevel = 1
arguments = ["self", "info"]
keyword = "352"
def main(self, info) :
self.hostnames[info["words"][7]] = info["words"][5]
self.whoislist[info["words"][7]] = info["words"][7] + "!" + info["words"][4] + "@" + info["words"][5]
| 255 | 103 |
import os
from flask_migrate import Migrate
from flask_restful import Api
from flask import Flask
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from config_manager.config_manager import FileConfigManager
from utils.request_controller import RequestsController
app = Flask(__name__)
CORS(app)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:p@192.168.43.141:5432/collaboration_db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
if 'L2_CONFIG_PATH' in os.environ and os.environ['L2_CONFIG_PATH'] != 'None':
configs = FileConfigManager(os.environ.get('L2_CONFIG_PATH'))
else:
print('No Configuration Manager Found')
db = SQLAlchemy(app)
db.init_app(app)
migrate = Migrate(app, db)
from routes import collaboration
from models.model_collaboration import Collaboration_Model
# Collaboration API Signatures
api.add_resource(collaboration.CollaborationAPI, '/this_is_test_api_for_collaborate')
# API Signature for Comment
api.add_resource(collaboration.CommentReadUpdateDelete, '/comments')
api.add_resource(collaboration.CommentWrite, '/comments')
# API Signature for Feedback
api.add_resource(collaboration.FeedbackWrite, '/feedback')
api.add_resource(collaboration.FeedbackRead, '/feedback')
| 1,266 | 427 |
# Altere o programa anterior para que ele aceite apenas números entre 0 e 1000
condition = True
conjunto = []
while condition:
numero = int(input("Digite os números do conjunto (Digite 0 para parar): "))
if numero == 0:
break
elif numero > 1000 or numero < 0:
print("Digite somente números entre 0 e 1000.")
else:
conjunto.append(numero)
print("Soma dos valores do conjunto: {}!".format(sum(conjunto)))
print("O maior valor do conjunto: {}!".format(max(conjunto)))
print("O menor valor do conjunto: {}!".format(min(conjunto))) | 579 | 192 |
import cv2
cam = cv2.VideoCapture(0)
while True:
camera,frame = cam.read()
cv2.imshow('imagem camera', frame)
if cv2.waitKey(1) == ord('f'):
break
cam.release()
cv2.destroyAllWindows() | 207 | 79 |
import discord
from discord.ext import commands
class Help(commands.Cog):
def __init__(self, client):
self.client = client
# settings up the custom help functions
@commands.command()
async def help(self, ctx):
embed = discord.Embed(
title='Fathom Chan', description="A bot for your Fathom anime film related needs. Below are a list of commands:", color=0xE69138)
embed.add_field(
name='.help', value='Calls up list of commands that user can perform', inline=False)
embed.add_field(
name='.movies', value='Fetchs current Fathom event anime movies playing', inline=False)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Help(client))
| 750 | 223 |
import torch
from allennlp.common.registrable import Registrable
from typing import Tuple
class CoverageMatrixAttention(torch.nn.Module, Registrable):
"""
The ``CoverageMatrixAttention`` computes a matrix of attention probabilities
between the encoder and decoder outputs. The attention function has access
to the cumulative probabilities that the attention has assigned to each
input token previously. In addition to the attention probabilities, the function
should return the coverage vectors which were used to compute the distribution
at each time step as well as the new coverage vector which takes into account
the function's computation.
The module must compute the probabilities instead of the raw scores (like
the ``MatrixAttention`` module does) because the coverage vector contains
the accumulated probabilities.
"""
def forward(self,
decoder_outputs: torch.Tensor,
encoder_outputs: torch.Tensor,
encoder_mask: torch.Tensor,
coverage_vector: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Computes a matrix of attention scores and updates the coverage vector.
Parameters
----------
decoder_outputs: (batch_size, num_decoder_tokens, hidden_dim)
The decoder's outputs.
encoder_outputs: (batch_size, num_encoder_tokens, hidden_dim)
The encoder's outputs.
encoder_mask: (batch_size, num_encoder_tokens)
The encoder token mask.
coverage_vector: (batch_size, num_encoder_tokens)
The cumulative attention probability assigned to each input token
thus far.
Returns
-------
torch.Tensor: (batch_size, num_decoder_tokens, num_encoder_tokens)
The attention probabilities between each decoder and encoder hidden representations.
torch.Tensor: (batch_size, num_decoder_tokens, num_encoder_tokens)
The coverage vectors used to compute the corresponding attention probabilities.
torch.Tensor: (batch_size, num_encoder_tokens)
The latest coverage vector after computing
"""
raise NotImplementedError
| 2,255 | 577 |
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from factory import (
Sequence,
SubFactory,
)
from factory.django import DjangoModelFactory
from accelerator.tests.factories.partner_factory import PartnerFactory
from accelerator.tests.factories.program_factory import ProgramFactory
from accelerator.tests.factories.program_partner_type_factory import (
ProgramPartnerTypeFactory
)
ProgramPartner = swapper.load_model('accelerator', 'ProgramPartner')
class ProgramPartnerFactory(DjangoModelFactory):
class Meta:
model = ProgramPartner
program = SubFactory(ProgramFactory)
partner = SubFactory(PartnerFactory)
partner_type = SubFactory(ProgramPartnerTypeFactory)
description = Sequence(
lambda n: "Description of Program Partner #{0}".format(n))
| 861 | 243 |