content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import uuid
from django.db.models import Model, UUIDField, DateTimeField
class TimeStampedModel(Model):
# id = UUIDField(
# primary_key=True,
# default=uuid.uuid4,
# editable=False
# )
created_at = DateTimeField(
auto_now_add=True,
verbose_name='Created datetime stamp'
)
updated_at = DateTimeField(
auto_now=True,
verbose_name='Last updated datetime stamp'
)
class Meta:
abstract = True
@property
def is_new(self):
return (self.updated_at - self.created_at).total_seconds() < 0.001
@property
def is_modified(self):
return (self.updated_at - self.created_at).total_seconds() >= 0.001
|
nilq/baby-python
|
python
|
import pickle
import unittest
from typing import Optional
import boost_histogram as bh
import numpy as np
from bootstraphistogram import BootstrapHistogram
def _standard_error_mean(size, sigma=1.0):
return sigma / np.sqrt(size)
def _standard_error_std(size, sigma=1.0):
return np.sqrt(sigma ** 2 / (2.0 * size))
class TestBootstrapHistogram1D(unittest.TestCase):
def assertArrayEqual(
self, actual: np.ndarray, expected: np.ndarray, msg: Optional[str] = None
) -> None:
return self.assertTrue(np.array_equal(actual, expected), msg=msg)
def assertArrayAlmostEqual(
self,
actual: np.ndarray,
expected: np.ndarray,
delta: float,
msg: Optional[str] = None,
) -> None:
return self.assertTrue(np.all(np.abs(actual - expected) < delta), msg=msg)
def test_contructor(self):
# check constructor works without raising error
BootstrapHistogram(bh.axis.Regular(100, -1.0, 1.0), rng=1234)
return
def test_fill(self):
hist = BootstrapHistogram(
bh.axis.Regular(100, -5.0, 5.0), numsamples=10, rng=1234
)
size = 100000
data = np.random.normal(loc=0.0, scale=1.0, size=size)
hist.fill(data)
x = hist.axes[0].centers
y = hist.view()[:, np.random.randint(0, hist.numsamples)]
mean = np.average(x, weights=y)
std = np.average((x - mean) ** 2, weights=y)
binwidth = hist.axes[0].edges[1] - hist.axes[0].edges[0]
self.assertAlmostEqual(
mean, 0.0, delta=5.0 * _standard_error_mean(size=size) + binwidth
)
self.assertAlmostEqual(
std, 1.0, delta=5.0 * _standard_error_std(size=size) + binwidth
)
return
def test_samples(self):
numsamples = 100
hist = BootstrapHistogram(
bh.axis.Regular(100, 0.0, 1.0), numsamples=numsamples, rng=1234
)
size = 100000
data = np.random.uniform(size=size)
hist.fill(data)
y = hist.view()
mean = np.average(y, axis=1)
std = np.std(y, axis=1)
nbins = len(hist.axes[0])
self.assertArrayAlmostEqual(
mean, size / nbins, delta=5.0 * np.sqrt(size / nbins)
)
self.assertArrayAlmostEqual(
std,
np.sqrt(size / nbins),
delta=5.0
* _standard_error_std(size=numsamples, sigma=np.sqrt(size / nbins)),
)
return
def test_numsamples_property(self):
numsamples = 100
hist = BootstrapHistogram(
bh.axis.Regular(100, -5.0, 5.0), numsamples=numsamples, rng=1234
)
self.assertEqual(hist.numsamples, numsamples)
def test_axes_property(self):
axes = (bh.axis.Regular(100, -5.0, 5.0),)
hist = BootstrapHistogram(*axes, rng=1234)
self.assertEqual(hist.axes[:-1], axes)
def test_view_property(self):
numsamples = 10
nbins = 5
hist = BootstrapHistogram(
bh.axis.Regular(nbins, -5.0, 5.0), numsamples=numsamples, rng=1234
)
view = hist.view()
self.assertArrayEqual(view, np.zeros(shape=(nbins, numsamples)))
def test_equality(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=123)
hist2 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=123)
data = np.random.normal(size=1000)
hist1.fill(data)
hist2.fill(data)
self.assertEqual(hist1, hist2)
def test_inequality(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0))
hist2 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0))
data = np.random.normal(size=1000)
hist1.fill(data)
hist2.fill(data)
self.assertNotEqual(hist1, hist2)
def test_add(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist2 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist1.fill(np.random.normal(size=1000))
hist2.fill(np.random.normal(size=1000))
a1 = hist1.view()
a2 = hist2.view()
hist3 = hist1 + hist2
self.assertArrayEqual(hist3.view(), a1 + a2)
def test_multiply_by_scalar(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist1.fill(np.random.normal(size=1000))
scale = 2.0
a1 = hist1.view() * scale
hist3 = hist1 * scale
self.assertArrayEqual(hist3.view(), a1)
def test_divide_by_scalar(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist1.fill(np.random.normal(size=1000))
scale = 2.0
a1 = hist1.view() / scale
hist3 = hist1 / scale
self.assertArrayEqual(hist3.view(), a1)
def test_pickle(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist1.fill(np.random.normal(size=1000))
hist2 = pickle.loads(pickle.dumps(hist1))
self.assertEqual(hist1, hist2)
def test_nominal(self):
hist = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
data = np.random.normal(size=1000)
hist.fill(data)
arr, _ = np.histogram(data, bins=hist.axes[0].edges)
self.assertArrayEqual(hist.nominal.view(), arr)
def test_mean(self):
size = 100000
hist = BootstrapHistogram(
bh.axis.Regular(100, 0.0, 1.0), numsamples=100, rng=1234
)
data = np.random.uniform(size=size)
hist.fill(data)
nbins = len(hist.axes[0])
self.assertArrayAlmostEqual(
hist.mean(), size / nbins, delta=5.0 * np.sqrt(size / nbins)
)
return
def test_std(self):
numsamples = 100
hist = BootstrapHistogram(
bh.axis.Regular(100, 0.0, 1.0), numsamples=numsamples, rng=1234
)
size = 100000
data = np.random.uniform(size=size)
hist.fill(data)
nbins = len(hist.axes[0])
self.assertArrayAlmostEqual(
hist.std(),
np.sqrt(size / nbins),
delta=5.0
* _standard_error_std(size=numsamples, sigma=np.sqrt(size / nbins)),
)
return
|
nilq/baby-python
|
python
|
''' Simple build error dialog, access to logs etc. '''
import os.path
import os
os.environ['NO_AT_BRIDGE'] = '0'
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk # pylint:disable=no-name-in-module
class Handler(object):
''' Implicit signal handlers declared in glade. '''
def on_build_error_dialog_destroy(self, *args):
''' Window closed using window manager. '''
Gtk.main_quit(*args)
def on_build_error_dialog_close(self, *args):
''' User pushes Close button '''
Gtk.main_quit(*args)
def on_view_buildlog_button_clicked(self, button):
''' User pushes 'View buildlog' button. '''
print('view_buildlog')
Gtk.main_quit(self, button)
def on_ok_button_clicked(self, button):
''' User pushes 'OK' button. '''
Gtk.main_quit(self, button)
def main():
''' Indeed: main function... '''
builder = Gtk.Builder()
ui = os.path.dirname(os.path.abspath(__file__)) + "/build-error.ui"
builder.add_from_file(ui)
builder.connect_signals(Handler())
window = builder.get_object('build_error_dialog')
window.show_all()
Gtk.main()
if __name__ == '__main__':
main()
# vim: set expandtab ts=4 sw=4:
|
nilq/baby-python
|
python
|
tot18 = 0
totM = 0
tot20 = 0
while True:
idade = int(input('idade: '))
sexo =' '
while sexo not in 'MF':
sexo = str(input('Escolha o sexo:[M/F]: '))
if idade >= 18:
tot18 += 1
if sexo == 'M':
totM += 1
if sexo == 'F':
tot20 += 1
r = ' '
while r not in 'SN':
r = str(input('Quer continuar? [S/N] '))
if r == 'N':
break
print(f'o total de pessoas maior de 18 são {tot18}')
print(f'O total de homens cadastrados foi {totM}')
print(f'O total de mulheres com menos de 20 anos foi {tot20}')
|
nilq/baby-python
|
python
|
# coding: utf-8
import uuid
def createUUID():
return uuid.uuid4().fields[ 0 ]
def createUUIDList( listSize ):
ret = [0] * listSize
for i in range( listSize ):
ret[ i ] = uuid.uuid4().fields[ 0 ]
return ret
def float2int( v, defaultValue = 0 ):
if( isinstance( v, float ) ):
return int( v )
else:
return defaultValue
def str2int( v, defaultValue = 0 ):
if( isinstance( v, str ) ):
return int( v )
else:
return defaultValue
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time Series Analysis -- Python/NumPy implementation
#
# Author: Jakob Rørsted Mosumgaard
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###############################################################################
# Modules
###############################################################################
from __future__ import print_function, with_statement, division
import numpy as np
###############################################################################
# Auxiliary functions
###############################################################################
def powerspec(time, flux, low, high, rate):
"""
Calculate the fourier power spectrum using a least mean square method.
Arguments:
- `time`: Array with the values of the time
- `flux`: Array with the measured flux
- `low` : The lowest test frequency
- `high`: The highest test frequency
- `rate`: The sampling rate (spacing between frequencies)
"""
# Generate test cyclic frequencies and convert to angular
freq = np.arange(low, high, rate)
ny = 2 * np.pi * freq
# Empty array to store calculated power
powers = np.zeros(shape=freq.shape)
# The loop over frequencies (least mean square)
for i in range(len(ny)):
pcos = np.cos(ny[i] * time)
psin = np.sin(ny[i] * time)
s = np.sum(flux * psin)
c = np.sum(flux * pcos)
ss = np.sum(np.square(psin))
cc = np.sum(np.square(pcos))
sc = np.sum(psin * pcos)
alpha = (s*cc - c*sc) / (ss*cc - sc**2)
beta = (c*ss - s*sc) / (ss*cc - sc**2)
freq_power = alpha**2 + beta**2
powers[i] = freq_power
# Return an array of test (cyclic) frequencies and the calculated power
return freq, powers
###############################################################################
# Script
###############################################################################
# Initial setup
datdir = '../testdata/'
outdir = '../output/'
compare = False
# Load data
infile = 'ts_14days.txt'
time, flux = np.loadtxt(datdir + infile, unpack=True)
# Convert time to megaseconds
time *= 1e-6
# Run power spectrum
freq, powers = powerspec(time, flux, 1900.0, 4100.0, 0.1)
# Compare to the true oscillations?
if compare:
# Load module
import matplotlib.pyplot as plt
# Load correct oscillations
oscfile = 'oscillations.dat'
l, n, nu, A, delta = np.loadtxt(datdir + oscfile, unpack=True)
# Plot
plt.figure()
plt.plot(freq, powers, 'r-')
plt.plot(nu, A**2, 'g*')
plt.title('Power spectrum')
plt.xlabel('nu [muHz]')
plt.ylabel('|V(t)|^2')
plt.savefig(outdir + 'test1.pdf')
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.13 on 2021-11-17 18:05
from django.db import migrations, models
import ic_marathon_app.validators
class Migration(migrations.Migration):
dependencies = [
('ic_marathon_app', '0010_auto_20201101_0745'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='category',
field=models.CharField(choices=[('beginnerrunner', 'Beginner Runner'), ('runner', 'Runner'), ('biker', 'Biker'), ('duathloner', 'Duathloner'), ('freestyler', 'Freestyler')], default='beginnerrunner', max_length=20),
),
migrations.AlterField(
model_name='workout',
name='distance',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=5, validators=[ic_marathon_app.validators.validate_distance], verbose_name='KM'),
),
]
|
nilq/baby-python
|
python
|
import sys
sys.path.insert(0,'../..')
import json
import requests
from tqdm import tqdm
from lxml import etree
from app import base
heroes_url = 'http://heroesjson.com/heroes.json'
image_prefix = 'http://us.battle.net/heroes/static'
class ChampionImporter(base.ChampionImporter):
def get_objects(self):
list_data = requests.get(heroes_url).json()
hero_ids = [h['name'] for h in list_data]
hero_map = {
'butcher': 'the-butcher',
# Cho'Gall character
'cho': 'chogall',
'gall': 'chogall',
'li-li': 'lili',
'liming': 'li-ming',
}
objects = []
for hero_id in tqdm(hero_ids, desc='Parsing champions'):
hero_id = ''.join([h for h in hero_id if h.isalpha() or h == ' '])
hero_id = hero_id.replace(' ', '-')
hero_id = hero_id.lower()
if hero_id in hero_map:
hero_id = hero_map[hero_id]
if hero_id in ['chogall', 'greymane']:
print('Skip {} have no idea how to handle it for now.'.format(hero_id))
continue
detail_url = 'http://eu.battle.net/heroes/en/heroes/{}/'.format(hero_id)
hero_response = requests.get(detail_url)
if hero_response.status_code != 200:
raise Exception('Invalid URL. Update hero_map maybe?')
tree = etree.HTML(hero_response.content)
hero_script = tree.xpath('/html/body/div[2]/div/script')[0].text
start_pos, end_pos = hero_script.find('{'), hero_script.rfind('}')
hero_json = json.loads(hero_script[start_pos:end_pos + 1])
o_name = tree.xpath('/html/body/div[2]/div/div[2]/div/div[3]/div[1]/div[2]/h1')[0].text.strip()
o_title = None
o_nation = tree.xpath('//*[@id="hero-summary"]/div[2]/div/div[2]')[0].text.strip()
o_ranged = hero_json['type']['slug'] != 'melee'
o_image_url = '{}{}'.format(
'http://us.battle.net',
tree.xpath('/html/body/div[2]/div/div[2]/div/div[3]/div[2]/div[2]/ul/li[1]/img')[0].attrib['src']
)
o_image = self.download_image(o_image_url, '{}.jpg'.format(hero_id))
champion = base.Champion(
hero_id, o_name, o_image, o_title, is_range=o_ranged, nation=o_nation
)
for ability in hero_json['abilities'] + hero_json['heroicAbilities'] + [hero_json['trait']]:
s_id = '{}_{}'.format(hero_id, ability['slug']).lower()
s_name = ability['name']
s_image_url = '{}{}'.format(image_prefix, ability['icon'])
s_image = self.download_image(s_image_url, '{}_{}.png'.format(
hero_id, s_id
))
skill = base.Skill(s_id, s_name, s_image)
champion.add_skill(skill)
objects.append(champion)
return objects
class ItemImporter(base.ItemImporter):
def get_objects(self):
return []
class SettingsImporter(base.SettingsImporter):
def get_objects(self):
return {
'ios': {
'ad_small': 'ca-app-pub-4764697513834958/6893120062',
'ad_big': 'ca-app-pub-4764697513834958/2183718861',
'ad_video_id': '1197471',
'ad_video_key': '4c0a685045ec2ea625ac4e00bfd52e894e11b90e',
'tracking': 'UA-77793311-2',
'store': 'itms-apps://itunes.apple.com/app/id1175817991',
'store_premium': 'com.puppybox.quizhots.premium_version',
},
'android': {
'ad_small': 'ca-app-pub-4764697513834958/4637657667',
'ad_big': 'ca-app-pub-4764697513834958/5695588466',
'ad_video_id': '1197472',
'ad_video_key': 'a04ae4e3efe676b70a3f19695b0f95b448e7bb8c',
'tracking': 'UA-77793311-3',
'store': 'market://details?id=com.puppybox.quizhots',
'store_premium': 'com.puppybox.quizhots.premium_version',
},
'windows': {
'ad_small': 'ca-app-pub-4764697513834958/7883646863',
'ad_big': 'ca-app-pub-4764697513834958/7744046068',
'ad_video_id': '',
'ad_video_key': '',
'tracking': '',
'store': '',
},
'legal_disclaimer': 'This application is not created, sponsored or endorsed by Blizzard Entertainment® and doesn’t reflect the views or opinions of Blizzard Entertainment® or anyone officially involved in producing or managing Heroes of the Storm. Heroes of the Storm is a registered trademark of Blizzard Entertainment®. All in-game descriptions, characters, locations, imagery and videos of game content are copyright and are trademarked to their respective owners. Usage for this game falls within fair use guidelines.',
'highscore_url': 'http://mobascore-puppybox.rhcloud.com/api/v1/leaderboards/hots/scores/',
'source_name': 'Heroes of the Storm',
'source_url': 'http://eu.battle.net/heroes/',
}
class AchievementImporter(base.AchievementImporter):
pass
items = ItemImporter().run()
champions = ChampionImporter().run()
achievements = AchievementImporter(items, champions).run()
settings = SettingsImporter().run()
|
nilq/baby-python
|
python
|
import mysql.connector
from flask import Flask, request, jsonify, redirect
import json
from datetime import datetime
from furl import furl
app = Flask(__name__)
mydb = mysql.connector.connect(
host="127.0.0.1",
#port=3308,
#user="python_boi",
user="admin",
#passwd="qrXEoFtaVXGkuJHT",
passwd="",
database="test"
)
mycursor = mydb.cursor(dictionary=True)
users_table = "users"
groups_table = "groups"
#mycursor.execute("DROP TABLE users")
# mycursor.execute("CREATE TABLE IF NOT EXISTS users (id INT PRIMARY KEY NOT NULL AUTO_INCREMENT, created VARCHAR(255) NOT NULL, lastModified VARCHAR(255) NOT NULL, userName VARCHAR(255) NOT NULL, name VARCHAR(255), displayName VARCHAR(255), nickName VARCHAR(255), profileUrl VARCHAR(255), title VARCHAR(255), userType VARCHAR(255), preferredLanguage VARCHAR(255), locale VARCHAR(255), timezone VARCHAR(255), active BOOLEAN, password VARCHAR(255), emails VARCHAR(255), phoneNumbers VARCHAR(255), ims VARCHAR(255), photos VARCHAR(255), addresses VARCHAR(255), groups VARCHAR(255), entitlements VARCHAR(255), roles VARCHAR(255), x509Certificates VARCHAR(255))")
# mycursor.execute("CREATE TABLE IF NOT EXISTS groups (id INT PRIMARY KEY NOT NULL AUTO_INCREMENT, created VARCHAR(255) NOT NULL, lastModified VARCHAR(255) NOT NULL, displayName VARCHAR(255) NOT NULL, members VARCHAR(255))")
# mycursor.execute("CREATE TABLE IF NOT EXISTS memberships (userID VARCHAR(255), groupID VARCHAR(255))")
mycursor.execute('SELECT * FROM users')
mycursor.fetchall()
field_names = [i[0] for i in mycursor.description]
group_field_names = ["id", "created", "lastModified", "displayName"]
print(field_names)
def user_count():
mycursor.execute("SELECT * FROM {}".format(users_table))
mycursor.fetchall()
count = mycursor.rowcount
print("\nrow count:\n"+str(count))
return count
def create_user():
req_data = json.loads(request.data, strict=False)
mycursor.execute("SELECT COUNT(*) FROM "+users_table+" WHERE userName='"+json.dumps(req_data["userName"])+"' LIMIT 0,1")
fetch = mycursor.fetchone()
if fetch["COUNT(*)"] != 0:
result = {}
result["schemas"] = ["urn:ietf:params:scim:api:messages:2.0:Error"]
result["status"] = 409
result["detail"] = "User already exists in the database."
return json.dumps(result), 409
else:
timestamp = str(datetime.utcnow())
req_data["created"] = timestamp
req_data["lastModified"] = timestamp
if users_table == "users3":
req_data["id"] = str(json.loads(req_data["externalId"])) # added for okta user id primary key
if bool(req_data["active"]) == True:
req_data["active"] = 1
else:
req_data["active"] = 0
keys = []
values = []
for x in req_data:
if x in field_names:
keys.append(x)
values.append(json.dumps(req_data[x]))
keys = (str(keys)[1:-1]).replace("'","")
values = (str(values)[1:-1])
mycursor.execute("INSERT INTO {} ({}) VALUES ({})".format(users_table, keys, values))
mydb.commit()
return read_user(mycursor.lastrowid), 201
def read_all_users():
url = request.url
params = furl(request.url)
if url.find('filter=') == -1:
mycursor.execute("SELECT COUNT(*) FROM {} LIMIT 0,1".format(users_table))
fetch = mycursor.fetchone()
else:
user_query = json.dumps(params.args["filter"].replace('userName eq ', ''))
mycursor.execute("SELECT COUNT(*) FROM {} WHERE userName = {} LIMIT 0,1".format(users_table, user_query))
fetch = mycursor.fetchone()
result = {}
result["schemas"] = ["urn:ietf:params:scim:api:messages:2.0:ListResponse"]
result["totalResults"] = fetch["COUNT(*)"]
result["startIndex"] = 1
result["itemsPerPage"] = fetch["COUNT(*)"]
result["Resources"] = []
if url.find('filter=') == -1:
mycursor.execute("SELECT id FROM {}".format(users_table))
alldata = mycursor.fetchall()
for x in alldata:
if users_table == "users3":
current_user = json.dumps(read_user(x['id']))
else:
current_user = json.loads(read_user(x['id']))
#result["Resources"].append(json.loads(current_user))
result["Resources"].append(current_user)
else:
if result["totalResults"] > 0 :
mycursor.execute("SELECT id FROM {} WHERE userName = {} ORDER BY id ASC".format(users_table, user_query))
fetch = mycursor.fetchone()
result["Resources"].append(json.loads(read_user(fetch['id'])))
return json.dumps(result).replace("\\","")
def read_user(id):
if users_table == "users3":
mycursor.execute("SELECT COUNT(*) FROM {} WHERE id = {} LIMIT 0,1".format(users_table, json.dumps(id)))
else:
mycursor.execute("SELECT COUNT(*) FROM {} WHERE id = {} LIMIT 0,1".format(users_table, id))
fetch = mycursor.fetchone()
result = {}
if fetch["COUNT(*)"] == 0:
result["schemas"] = ["urn:ietf:params:scim:api:messages:2.0:Error"]
result["status"] = 404
result["detail"] = "User not found"
return json.dumps(result), 404
else:
if users_table == "users3":
mycursor.execute("SELECT * FROM {} WHERE id = {} LIMIT 0,1".format(users_table, json.dumps(id)))
else:
mycursor.execute("SELECT * FROM {} WHERE id = {} LIMIT 0,1".format(users_table, id))
data = mycursor.fetchone()
result["schemas"] = ["urn:ietf:params:scim:schemas:core:2.0:User"]
if users_table == "users3":
result["id"] = json.loads(id)
else:
result["id"] = id
result["userName"] = json.loads(data["userName"])
result["active"] = bool(data["active"])
if data["name"] != None:
result["name"] = json.loads(data["name"])
if data["displayName"] != None:
result["displayName"] = json.loads(data["displayName"])
if data["nickName"] != None:
result["nickName"] = json.loads(data["nickName"])
if data["profileUrl"] != None:
result["profileUrl"] = json.loads(data["profileUrl"])
if data["title"] != None:
result["title"] = json.loads(data["title"])
if data["preferredLanguage"] != None:
result["preferredLanguage"] = json.loads(data["preferredLanguage"])
if data["locale"] != None:
result["locale"] = json.loads(data["locale"])
if data["timezone"] != None:
result["timezone"] = json.loads(data["timezone"])
if data["emails"] != None:
result["emails"] = json.loads(data["emails"])
if data["phoneNumbers"] != None:
result["phoneNumbers"] = json.loads(data["phoneNumbers"])
if data["ims"] != None:
result["ims"] = json.loads(data["ims"])
if data["photos"] != None:
result["photos"] = json.loads(data["photos"])
if data["addresses"] != None:
result["addresses"] = json.loads(data["addresses"])
if data["entitlements"] != None:
result["entitlements"] = json.loads(data["entitlements"])
if data["roles"] != None:
result["roles"] = json.loads(data["roles"])
if data["x509Certificates"] != None:
result["x509Certificates"] = json.loads(data["x509Certificates"])
if data["extension_enterprise"] != None:
result["urn:ietf:params:scim:schemas:extension:enterprise:2.0:User"] = json.loads(data["extension_enterprise"])
result["schemas"].append("urn:ietf:params:scim:schemas:extension:enterprise:2.0:User")
result["schemas"].append("urn:ietf:params:scim:schemas:extension:lattice:attributes:1.0:User")
result["groups"] = []
result["meta"] = { "resourceType" : "User", "created" : json.loads(data["created"]), "lastModified" : json.loads(data["lastModified"]), "location" : "http://localhost:8080/scim/v2/Users/"+str(id)}
return json.dumps(result).replace("\\","")
def update_user(id):
if request.method == 'PUT':
req_data = json.loads(request.data, strict=False)
timestamp = str(datetime.utcnow())
req_data["lastUpdated"] = timestamp
if users_table == "users3":
req_data["id"] = req_data["id"]
else:
req_data["id"] = int(id)
if req_data["active"]:
req_data["active"] = 1
else:
req_data["active"] = 0
for data in req_data:
if data in field_names:
mycursor.execute("UPDATE " + users_table + " SET " + data + " = %s WHERE id=%s",(json.dumps(req_data[data]), req_data["id"]))
if "urn:ietf:params:scim:schemas:extension:enterprise:2.0:User" in req_data["schemas"]:
mycursor.execute("UPDATE " + users_table + " SET extension_enterprise = %s WHERE id=%s",(json.dumps(req_data["urn:ietf:params:scim:schemas:extension:enterprise:2.0:User"]), req_data["id"]))
elif request.method == 'PATCH':
req_data = json.loads(request.data, strict=False)
if req_data["Operations"][0]["value"]["active"] == False:
data = "active=0"
else:
data = "active=1"
mycursor.execute("UPDATE " + users_table + " SET " + data + " WHERE id=" + id)
mydb.commit()
return read_user(id)
def create_group():
req_data = json.loads(request.data, strict=False)
timestamp = str(datetime.utcnow())
req_data["created"] = timestamp
req_data["lastModified"] = timestamp
memberships = req_data["members"]
keys = []
values = []
for x in req_data:
if x in group_field_names:
keys.append(x)
values.append(json.dumps(req_data[x]))
keys = (str(keys)[1:-1]).replace("'","")
values = (str(values)[1:-1])
mycursor.execute("INSERT INTO {} ({}) VALUES ({})".format(groups_table, keys, values))
mydb.commit()
group_id = mycursor.lastrowid
for x in memberships:
mycursor.execute("INSERT INTO {} ({}) VALUES ({},{})".format("memberships", "userID, groupID", x["value"], group_id))
mydb.commit()
return read_group(group_id), 201
def read_group(id):
mycursor.execute("SELECT COUNT(*) FROM {} WHERE id = {} LIMIT 0,1".format(groups_table, id))
fetch = mycursor.fetchone()
result = {}
if fetch["COUNT(*)"] == 0:
result["schemas"] = ["urn:ietf:params:scim:api:messages:2.0:Error"]
result["status"] = 404
result["detail"] = "Group not found"
return json.dumps(result), 404
else:
mycursor.execute("SELECT * FROM {} WHERE id = {} LIMIT 0,1".format(groups_table, id))
data = mycursor.fetchone()
result["schemas"] = ["urn:ietf:params:scim:schemas:core:2.0:Group"]
result["id"] = id
result["displayName"] = json.loads(data["displayName"])
result["members"] = []
mycursor.execute("SELECT userID FROM {} WHERE groupID = {}".format("memberships", id))
usersingroup = mycursor.fetchall()
for x in usersingroup:
result["members"].append(x["userID"])
result["meta"] = { "resourceType" : "Group", "created" : json.loads(data["created"]), "lastModified" : json.loads(data["lastModified"]), "location" : "http://localhost:8080/scim/v2/Groups/"+str(id)}
return json.dumps(result).replace("\\","")
def delete_group(id):
mycursor.execute("DELETE FROM {} WHERE id={}".format(groups_table, int(id)))
mydb.commit()
mycursor.execute("DELETE FROM {} WHERE groupID={}".format("memberships", int(id)))
mydb.commit()
return "",204
def update_group(id):
return "", 204
def read_all_groups():
result = {}
mycursor.execute("SELECT COUNT(*) FROM {} LIMIT 0,1".format(groups_table))
fetch = mycursor.fetchone()
result["schemas"] = ["urn:ietf:params:scim:api:messages:2.0:ListResponse"]
result["totalResults"] = fetch["COUNT(*)"]
result["startIndex"] = 1
result["itemsPerPage"] = fetch["COUNT(*)"]
result["Resources"] = []
mycursor.execute("SELECT id FROM {}".format(groups_table))
alldata = mycursor.fetchall()
for x in alldata:
current_user = json.loads(read_group(x['id']))
result["Resources"].append(current_user)
return json.dumps(result).replace("\\","")
@app.route("/scim/v2/Users", methods=['POST', 'GET'])
def users():
if request.method == 'GET':
return read_all_users()
elif request.method == 'POST':
return create_user()
@app.route("/scim/v2/Users/<id>", methods=['GET','PATCH','PUT'])
def users_id(id):
if request.method == 'GET':
return read_user(id)
else:
return update_user(id)
@app.route("/scim/v2/Groups", methods=['POST','GET'])
def groups():
if request.method == 'GET':
return read_all_groups()
elif request.method == 'POST':
return create_group()
@app.route("/scim/v2/Groups/<id>", methods=['GET','PATCH','DELETE', 'PUT'])
def groups_id(id):
if request.method == 'GET':
return read_group(id)
elif request.method == 'DELETE':
return delete_group(id)
elif request.method == 'PATCH':
return update_group(id)
elif request.method == 'PUT':
return read_group(id)
# return update_group(id)
@app.route("/oauth2/authorize", methods=['GET'])
def authorize():
url = request.url
params = furl(request.url)
redirect_uri = params.args["redirect_uri"]
state = params.args["state"]
code = "abcdef"
redirect_path = redirect_uri+"?state="+state+"&code="+code
return redirect(redirect_path, code=200)
@app.route("/oauth2/token", methods=['POST'])
def token():
token_response={}
token_response["scope"] = "api:admin"
token_response["token_type"] = "Bearer"
token_response["expires_in"] = 360
token_response["access_token"] = "mytoken"
token_response["refresh_token"] = "refresh"
return jsonify(token_response), 200
@app.route("/", methods=['GET'])
def home():
return "", 200
if __name__ == '__main__':
app.run(host="localhost", port=8080, debug=True)
|
nilq/baby-python
|
python
|
import sys
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
def check_keydown_events(event,ai_settings, screen,ship, bullets):
"""Responde a pressionamentos de tecla."""
# Move a espaçonave para a direira
if event.key == pygame.K_RIGHT:
ship.moving_right = True
# Move a espaçonave para a esquerda
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings,screen,ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def fire_bullet(ai_settings,screen,ship, bullets):
""" Dispara um projétil se o limite ainda não foi alcançado."""
# Cria um novo projétil e o adiciona ao grupo de projéteis
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings,screen,ship)
bullets.add(new_bullet)
def check_keyup_events(event, ship):
"""Responde a solturas de tecla."""
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ai_settings, screen, stats,sb, play_button, ship, aliens,bullets):
"""Responde a eventos de pressionamento de teclas e de mouse."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event,ai_settings, screen,ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats,sb, play_button,ship, aliens, bullets, mouse_x, mouse_y)
def check_play_button(ai_settings, screen, stats,sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):
"""Inicia um novo jogo quando o jogador clicar em Play."""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
# Reinicia as configurações do jogo
ai_settings.initialize_dynamic_settings()
# Oculta o cursor do mouse
pygame.mouse.set_visible(False)
# Reinicia os dados estatísticos do jogo
stats.reset_stats()
stats.game_active = True
# Reinicia as imagens do painel de pontuação
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
# Esvazia a lista de alienígenas e de projéteis
aliens.empty()
bullets.empty()
# Cria uma nova frota de aliens
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button):
"""Atualiza as imagens na tela e alterna para a nova tela."""
#Redesenha a tela a cada passagem pelo laço
screen.fill(ai_settings.bg_color)
# Redesenha todos os projéteis atrás da espaçonave e dos alienigenas
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
# Desenha a informação sobre pontuação
sb.show_score()
#Desenha o botão Play se o jogo estiver inativo
if not stats.game_active:
play_button.draw_button()
# Deixa a tela mais recente visivel
pygame.display.flip()
def update_bullets(ai_settings, screen, stats, sb,ship,aliens, bullets):
""" Atualiza a posição dos projéteis e se livra dos projéteis antigos. """
#Atualia as posições dos projéteis
bullets.update()
#Livra-se dos projéteis que desaparecem
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullets)
# Verifica se algum projétil atingiu os alienígenas
# Em caso afirmativo, livra-se do projétil e do alienígena
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens,bullets)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens,bullets):
"""Responde a colisões entre projéteis e alienígenas."""
# Remove qualquer projétil e alienígena que tenham colidido
collisions = pygame.sprite.groupcollide(bullets, aliens, True,True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
# Destrói os projéteis existentes e cria uma nova frota
bullets.empty()
ai_settings.increase_speed()
# Aumenta o nível
stats.level += 1
sb.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def get_number_aliens_x(ai_settings, alien_width):
""" Determina o número de alienígenas que cabem em uma linha."""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x/(2* alien_width))
return number_aliens_x
def get_number_rows(ai_settings,ship_height,alien_height):
""" Determina o número de linhas com alienigenas que cabem na tela. """
available_space_y = (ai_settings.screen_height - (3* alien_height) - ship_height)
number_rows = int(available_space_y/ (2* alien_height))
return number_rows
def create_alien(ai_settings, screen,aliens,alien_number, row_number):
# Cria um alienígena e o posiciona na linha
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings,screen,ship,aliens):
""" Cria uma frota completa de alienigenas"""
# Cria um alienigena e calcula o número de alienigenas em uma linha
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings,alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height,alien.rect.height)
# Cria frota de alienigenas
for row_number in range(number_rows):
# Cria a primeira lina de alienigenas
for alien_number in range(number_aliens_x):
# Cria um alienigena e o posiciona na linha
create_alien(ai_settings,screen,aliens, alien_number, row_number)
def check_fleet_edges(ai_settings, aliens):
"""Responde apropriadamente se algum alienígena alcançou uma borda."""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""Faz toda a frota descer e muda a sua direção."""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde ao fato de a espaçonave ter sido atingida por um alienígena."""
if stats.ships_left > 0:
# Decrementa ships_left
stats.ships_left -= 1
# Atualiza o painel de pontuações
sb.prep_ships()
# Esvazia a lista de alienígenas e de projéteis
aliens.empty()
bullets.empty()
# Cria uma nova frota e centraliza a espaçonave
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Faz uma pausa
sleep(0.5)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Verifica se algum alienígena alcançou a parte inferior da tela."""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
# Trata esse caso do mesmo modo que é feito quando a espaçonave é atingida
ship_hit(ai_settings, screen, stats, sb, ship, aliens,bullets)
break
def update_aliens(ai_settings, screen, stats, sb, ship, aliens,bullets):
""" Atualiza as posições de todos os alienígenas da frota."""
"""Verifica se a frota está em uma das bordas e então atualiza as posições de todos os alienígenas da frota. """
check_fleet_edges(ai_settings, aliens)
aliens.update()
# Verifica se houve colisões entre alienígenas e a espaçonave
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
# Verifica se há algum alienígena que atingiu a parte inferior da tela
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_high_score(stats, sb):
"""Verifica se há uma nova pontuação máxima."""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
|
nilq/baby-python
|
python
|
'''
gsconfig is a python library for manipulating a GeoServer instance via the GeoServer RESTConfig API.
The project is distributed under a MIT License .
'''
__author__ = "David Winslow"
__copyright__ = "Copyright 2012-2018 Boundless, Copyright 2010-2012 OpenPlans"
__license__ = "MIT"
from geoserver.support import ResourceInfo, xml_property, write_bool, workspace_from_url
from geoserver.style import Style
class _attribution(object):
def __init__(self, title, width, height, href, url, type):
self.title = title
self.width = width
self.height = height
self.href = href
self.url = url
self.type = type
def _read_attribution(node):
title = node.find("title")
width = node.find("logoWidth")
height = node.find("logoHeight")
href = node.find("href")
url = node.find("logoURL")
type = node.find("logoType")
if title is not None:
title = title.text
if width is not None:
width = width.text
if height is not None:
height = height.text
if href is not None:
href = href.text
if url is not None:
url = url.text
if type is not None:
type = type.text
return _attribution(title, width, height, href, url, type)
def _write_attribution(builder, attr):
builder.start("attribution", dict())
if attr.title is not None:
builder.start("title", dict())
builder.data(attr.title)
builder.end("title")
if attr.width is not None:
builder.start("logoWidth", dict())
builder.data(attr.width)
builder.end("logoWidth")
if attr.height is not None:
builder.start("logoHeight", dict())
builder.data(attr.height)
builder.end("logoHeight")
if attr.href is not None:
builder.start("href", dict())
builder.data(attr.href)
builder.end("href")
if attr.url is not None:
builder.start("logoURL", dict())
builder.data(attr.url)
builder.end("logoURL")
if attr.type is not None:
builder.start("logoType", dict())
builder.data(attr.type)
builder.end("logoType")
builder.end("attribution")
def _write_style_element(builder, name):
ws, name = name.split(':') if ':' in name else (None, name)
builder.start("name", dict())
builder.data(name)
builder.end("name")
if ws:
builder.start("workspace", dict())
builder.data(ws)
builder.end("workspace")
def _write_default_style(builder, name):
builder.start("defaultStyle", dict())
if name is not None:
_write_style_element(builder, name)
builder.end("defaultStyle")
def _write_alternate_styles(builder, styles):
builder.start("styles", dict())
for s in styles:
builder.start("style", dict())
_write_style_element(builder, getattr(s, 'fqn', s))
builder.end("style")
builder.end("styles")
class Layer(ResourceInfo):
def __init__(self, catalog, name):
super(Layer, self).__init__()
self.catalog = catalog
self.name = name
self.gs_version = self.catalog.get_short_version()
resource_type = "layer"
save_method = "PUT"
@property
def href(self):
return "{}/layers/{}.xml".format(self.catalog.service_url, self.name)
@property
def resource(self):
if self.dom is None:
self.fetch()
name = self.dom.find("resource/name").text
atom_link = [n for n in self.dom.find("resource").getchildren() if 'href' in n.attrib]
ws_name = workspace_from_url(atom_link[0].get('href'))
if self.gs_version >= "2.13":
if ":" in name:
ws_name, name = name.split(':')
return self.catalog.get_resources(names=name, workspaces=ws_name)[0]
def _get_default_style(self):
if 'default_style' in self.dirty:
return self.dirty['default_style']
if self.dom is None:
self.fetch()
element = self.dom.find("defaultStyle")
# aborted data uploads can result in no default style
return self._resolve_style(element) if element is not None else None
def _resolve_style(self, element):
if ":" in element.find('name').text:
ws_name, style_name = element.find('name').text.split(':')
else:
style_name = element.find('name').text
ws_name = None
atom_link = [n for n in element.getchildren() if 'href' in n.attrib]
if atom_link and ws_name is None:
ws_name = workspace_from_url(atom_link[0].get("href"))
return self.catalog.get_styles(names=style_name, workspaces=ws_name)[0]
def _set_default_style(self, style):
if isinstance(style, Style):
style = style.fqn
self.dirty["default_style"] = style
def _get_alternate_styles(self):
if "alternate_styles" in self.dirty:
return self.dirty["alternate_styles"]
if self.dom is None:
self.fetch()
styles_list = self.dom.findall("styles/style")
return [self._resolve_style(s) for s in styles_list]
def _set_alternate_styles(self, styles):
self.dirty["alternate_styles"] = styles
default_style = property(_get_default_style, _set_default_style)
styles = property(_get_alternate_styles, _set_alternate_styles)
attribution_object = xml_property("attribution", _read_attribution)
enabled = xml_property("enabled", lambda x: x.text == "true")
advertised = xml_property("advertised", lambda x: x.text == "true", default=True)
type = xml_property("type")
def _get_attr_attribution(self):
obj = {
'title': self.attribution_object.title,
'width': self.attribution_object.width,
'height': self.attribution_object.height,
'href': self.attribution_object.href,
'url': self.attribution_object.url,
'type': self.attribution_object.type
}
return obj
def _set_attr_attribution(self, attribution):
self.dirty["attribution"] = _attribution(
attribution['title'],
attribution['width'],
attribution['height'],
attribution['href'],
attribution['url'],
attribution['type']
)
assert self.attribution_object.title == attribution['title']
assert self.attribution_object.width == attribution['width']
assert self.attribution_object.height == attribution['height']
assert self.attribution_object.href == attribution['href']
assert self.attribution_object.url == attribution['url']
assert self.attribution_object.type == attribution['type']
attribution = property(_get_attr_attribution, _set_attr_attribution)
writers = dict(
attribution = _write_attribution,
enabled = write_bool("enabled"),
advertised = write_bool("advertised"),
default_style = _write_default_style,
alternate_styles = _write_alternate_styles
)
|
nilq/baby-python
|
python
|
from pandas import *
from math import ceil
from sklearn.ensemble import GradientBoostingRegressor, VotingRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import numpy as np
# Load the data
print('Reading the data...')
data = read_csv("insurance.csv")
print('Read completed.\n')
# One-hot encoding
print('Preprocessing data...')
data = get_dummies(data, columns=['sex', 'smoker', 'region'], drop_first=True)
# Format and Split the data
x = data[['age', 'bmi', 'children', 'sex_male', 'smoker_yes', 'region_northwest', 'region_southeast', 'region_southwest']]
y = data['charges']
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.4)
print('Preprocessing completed.\n')
# Train the model and make predictions
r1 = GradientBoostingRegressor(loss='huber', learning_rate=0.13, max_features='auto', alpha=0.7, random_state=1)
r2 = GradientBoostingRegressor(loss='huber', learning_rate=0.13, max_features='auto', alpha=0.7, random_state=1)
model = VotingRegressor([('gbr1', r1), ('gbr2', r2)])
model.fit(train_x, train_y)
print('Testing the model...')
predicted = model.predict(test_x)
mae = mean_absolute_error(test_y, predicted)
print('Mean Absolute Error : ',mae)
print('Testing completed.\n')
# Predict cost for a sample customer
print('Running for one sample...')
sample = DataFrame({
'age': 26,
'bmi': 25.44,
'children': 1,
'sex_male': 1,
'smoker_yes' : 0,
'region_northeast': 0,
'region_southeast': 0,
'region_southwest': 1,
}, [1])
print('Sample data : ',sample)
cost = model.predict(sample)[0]
print('Predicted cost : ', cost)
print('Sample run completed.\n')
print('Calculating premium...')
# Calculate premium
def compute_monthly_premium(cost):
multiplier = 1.1
return ceil(cost*multiplier)/12
print('Monthly Premium : ',compute_monthly_premium(cost))
print('Premium calculated.\n')
print('Program completed.')
print('Mean Absolute Error : ',mae)
|
nilq/baby-python
|
python
|
import math as math
# import random
# class Particle (object):
# """Paticle module"""
# def __init__ (self, mass=1.0, x=0.0, y=0.0, vx=0.0, vy=0.0):
# self.mass = mass
# self.x = x
# self.y = y
# self.vx = vx
# self.vy = vy
#
# def __str__ (self):
# return "(%s, %s, %s, %s, %s)" % (self.mass, self.x, self.y, self.vx, self.vy)
def Acceleration (particles):
G = 1
AccList_x = []
AccList_y = []
nParticle = len(particles)
for iParticle in range (0, nParticle):
ax = ay = 0.0
if(particles[iParticle].mass > 1.0e-7):
for jParticle in range (0, nParticle):
if(iParticle != jParticle):
dx = particles[iParticle].x - particles[jParticle].x
dy = particles[iParticle].y - particles[jParticle].y
r = math.sqrt(dx*dx + dy*dy)
r2 = r**2
xhat = dx/r
yhat = dy/r
ax = ax - G*particles[jParticle].mass*xhat/r2
ay = ay - G*particles[jParticle].mass*yhat/r2
AccList_x.append(ax)
AccList_y.append(ay)
else:
AccList_x.append(0.0)
AccList_y.append(0.0)
return AccList_x, AccList_y
def UpdateVelocity (particles, TimeStep):
ax, ay = Acceleration (particles)
nParticle = len(particles)
for iParticle in range(0, nParticle):
if(particles[iParticle].mass>1.0e-7):
particles[iParticle].vx = particles[iParticle].vx + ax[iParticle]*TimeStep
particles[iParticle].vy = particles[iParticle].vy + ay[iParticle]*TimeStep
else:
particles[iParticle].vx = particles[iParticle].vy = 0.0
return particles
def UpdatePosition (particles, TimeStep):
nParticle = len(particles)
for iParticle in range(0, nParticle):
particles[iParticle].x += particles[iParticle].vx*TimeStep
particles[iParticle].y += particles[iParticle].vy*TimeStep
return particles
def update_particle_list (particles, dt):
particles=UpdateVelocity (particles, dt)
particles=UpdatePosition (particles, dt)
return particles
# def MakeLogFile (particles, step=1):
# LogFile = open('LogFile.out', 'w')
# LogFile.write ("%d\t" % len(particles))
# LogFile.write ("%d\n\n" % step)
# for iParticle in range(0, len(particles)):
# LogFile.write ("%14.10f\t"% particles[iParticle].mass)
# LogFile.write ("%14.10f\t"% particles[iParticle].x)
# LogFile.write ("%14.10f\t"% particles[iParticle].y)
# LogFile.write ("%14.10f\t"% particles[iParticle].vx)
# LogFile.write ("%14.10f\n"% particles[iParticle].vy)
#
# LogFile.close()
# return 0
#
# def TimeEvaluation (particles, dt, nt, SaveLogFile=50, SaveXYZFile=100):
# nParticle = len(particles)
# OutputFile = open('OutputFile-2.xyz', 'w')
# for step in range (1, nt):
# update_particle_list (particles, dt)
#
# if(step%SaveLogFile == 0):
# MakeLogFile (particles, step)
#
# if(step%SaveXYZFile == 0):
# OutputFile.write ("%d\n" % nParticle)
# OutputFile.write ("MD\n")
# for iParticle in range(0, nParticle):
# # OutputFile.write ("%d\t"% (iParticle + 1))
# OutputFile.write ("Ar\t")
# OutputFile.write ("%12.8f\t"% particles[iParticle].x)
# OutputFile.write ("%12.8f\t"% particles[iParticle].y)
# OutputFile.write ("%12.8f\n"% 0.0)
#
# OutputFile.close()
# return 0
#
# def InitializeParticle (nParticle=1,
# origin_x=0.0, origin_y=0.0,
# corner_x=10.0, corner_y=10.0,
# LengthScale = 1.0,
# VelocityScale=1.0,
# MassScale=1.0,
# restart=False):
#
# particles = []
# if(restart == False):
# InitializeParticle = open('InitializeParticle.out', 'w')
# InitializeParticle.write ("%d\n" % nParticle)
# for iParticle in range(0, nParticle):
# InitializeParticle.write ("%d\t" % (iParticle+1))
# # Mass
# mass = MassScale*random.uniform(0.0, 1.0)
# InitializeParticle.write ("%14.10f\t" % mass)
#
# # Position
# xpar = LengthScale*random.uniform(origin_x, corner_x)
# ypar = LengthScale*random.uniform(origin_y, corner_y)
#
# InitializeParticle.write ("%14.10f\t" % xpar)
# InitializeParticle.write ("%14.10f\t" % ypar)
#
# # Velocity
# vxpar = random.uniform(0.0, 1.0)
# vypar = random.uniform(0.0, 1.0)
# dv = math.sqrt(vxpar*vxpar + vypar*vypar)
# vxpar = VelocityScale*vxpar/dv
# vypar = VelocityScale*vypar/dv
# InitializeParticle.write ("%14.10f\t" % vxpar)
# InitializeParticle.write ("%14.10f\n" % vypar)
#
# particle = Particle (mass, xpar, ypar, vxpar, vypar)
# particles.append (particle)
#
# InitializeParticle.close ()
# else:
# LogFile = open('LogFile.out', 'r')
#
# LogFile.close ()
# return particles
#
# par = InitializeParticle (10)
# TimeEvaluation (par, 0.01, 1000)
|
nilq/baby-python
|
python
|
import pytest
from spellbot.settings import Settings
class TestMigrations:
@pytest.mark.nosession
def test_alembic(self, settings: Settings):
from spellbot.models import create_all, reverse_all
create_all(settings.DATABASE_URL)
reverse_all(settings.DATABASE_URL)
|
nilq/baby-python
|
python
|
settings = {
"aoi":"https://gdh-data.ams3.digitaloceanspaces.com/scarborough.geojson",
"systems":["GI", "TRANS", "URBAN", "AG", "HYDRO"],
"outputdirectory":"output",
"workingdirectory": "working",
"sentinelscene": "S2B_MSIL1C_20171126T112359_N0206_R037_T30UXF_20171126T132429",
"rivers":"rivers/rivers.shp",
"watersheds":"watershed/watershed.shp"
}
processchains = [{"list": [{"id": "importer_1",
"module": "importer",
"inputs": [{"import_descr": {"source": settings['sentinelscene'],
"type": "sentinel2",
"sentinel_band": "B04"},
"param": "map",
"value": "B04"},
{"import_descr": {"source": settings['sentinelscene'],
"type": "sentinel2",
"sentinel_band": "B08"},
"param": "map",
"value": "B08"},
{"import_descr": {"source": settings['aoi'],
"type": "vector"},
"param": "map",
"value": "aoi"}]},
{"id": "g_region_1",
"module": "g.region",
"inputs": [{"param": "raster",
"value": "B04"}],
"flags": "g"},
{"id": "g_region_2",
"module": "g.region",
"inputs": [{"param": "vector",
"value": "aoi"}],
"flags": "g"},
{"id": "r_mask",
"module": "r.mask",
"inputs": [{"param": "vector",
"value": "aoi"}]},
{"id": "rmapcalc_1",
"module": "r.mapcalc",
"inputs": [{"param": "expression",
"value": "NDVI = float((B08 - B04)/(B08 + B04))"}]},
{"id": "r_univar_ndvi",
"module": "r.univar",
"inputs": [{"param": "map",
"value": "NDVI"}],
"flags": "g"},
{"id": "r_slope_aspect",
"module": "r.slope.aspect",
"inputs": [{"param": "elevation",
"value": "srtmgl1_v003_30m@srtmgl1_30m"},
{"param": "slope",
"value": "slope"}]},\
{"id": "exporter_1",
"module": "exporter",
"outputs": [{"export": {"type": "raster", "format": "GTiff"},
"param": "map",
"value": "NDVI"},
{"export": {"type": "raster", "format": "GTiff"},
"param": "map",
"value": "slope"},
# {"export": {"type": "raster", "format": "GTiff"},
# "param": "map",
# "value": "B04"},
# {"export": {"type": "raster", "format": "GTiff"},
# "param": "map",
# "value": "B08"}
]}
],
"version": "1"}]
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch as th
import numpy as np
from braingp.model import Model, Param
from braingp.mean_function import Identity, Linear, Zero
from gpytorch import settings
from braingp import kernel
from doubly_stochastic_dgp.layers import SVGP_Layer
def init_layers_linear(X, Y, Z, kernels, #XYZ are pytorch tensors
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.size(1)
layers = []
X_running, Z_running = torch.tensor(X.numpy), torch.tensor(Z.numpy) # gpflow.kernel object , kernel-gpytorch with ARD
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]): #kernels is a list
dim_in = kern_in.input_dim #using gptorch kernel object type
dim_out = kern_out.input_dim
print(dim_in, dim_out)
if dim_in == dim_out:
mf = Identity()
else:
if dim_in > dim_out: # stepping down, use the pca projection
_, _, V = np.linalg.svd(X_running, full_matrices=False)
W = V[:dim_out, :].T
else: # stepping up, use identity + padding
W = np.concatenate([np.eye(dim_in), np.zeros((dim_in, dim_out - dim_in))], 1)
mf = Linear(W)
mf = th.from_numpy(mf)
mf.set_trainable(False) #check parameterized.py gpflow no alt. in torch.parameter.nn
layers.append(Layer(kern_in, Z_running, dim_out, mf, white=white))
if dim_in != dim_out:
Z_running = th.matmul(Z_running,W)
X_running = th.matmul(X_running,W)
# final layer
layers.append(Layer(kernels[-1], Z_running, num_outputs, mean_function, white=white))
return layers
def init_layers_input_prop(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
D = X.size(1)
M = Z.size(0)
layers = []
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim - D
std_in = kern_in.variance.read_value()**0.5
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kern_in, Z_padded, dim_out, Zero(), white=white, input_prop_dim=D))
dim_in = kernels[-1].input_dim
std_in = kernels[-2].variance.read_value()**0.5 if dim_in > D else 1.
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
Z_padded = th.from_numpy(Z_padded)
layers.append(Layer(kernels[-1], Z_padded, num_outputs, mean_function, white=white))
return layers
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
import ast
import operator
def main():
with open(sys.argv[1]) as infile:
for line in infile:
topic = ast.literal_eval(line)
sorted_x = sorted(topic.items(), key=operator.itemgetter(1), reverse=True)
print " ".join([i[0] for i in sorted_x])
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('group', models.CharField(max_length=5)),
('course', models.IntegerField()),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('number', models.IntegerField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mark', models.IntegerField()),
('date', models.DateField()),
('job', models.ForeignKey(to='home.Job')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('patronymic', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('group', models.ForeignKey(null=True, to='home.Group')),
('jobs', models.ManyToManyField(to='home.Job', through='home.Log')),
],
),
migrations.CreateModel(
name='Student_Subject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('student', models.ForeignKey(to='home.Student')),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('patronymic', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('subjects', models.ManyToManyField(to='home.Subject')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AddField(
model_name='student_subject',
name='subject',
field=models.ForeignKey(to='home.Subject'),
),
migrations.AddField(
model_name='student_subject',
name='teacher',
field=models.ForeignKey(to='home.Teacher'),
),
migrations.AddField(
model_name='student',
name='subjects',
field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'),
),
migrations.AddField(
model_name='student',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='log',
name='student',
field=models.ForeignKey(to='home.Student'),
),
migrations.AddField(
model_name='job',
name='subject',
field=models.ForeignKey(to='home.Subject'),
),
]
|
nilq/baby-python
|
python
|
import os
from random import shuffle
import csv
def getGenderDict():
return {
'aia' : ['female',0],
'bonnie' : ['female',0],
'jules' : ['male',1],
'malcolm' : ['male',1],
'mery' : ['female',0],
'ray' : ['male',1]
}
def getEmotionDict():
return {
'anger' : 0,
'disgust' : 1,
'fear' : 2,
'joy' : 3,
'neutral' : 4,
'sadness' : 5,
'surprise' : 6
}
def getRows(image_names, dir, genderDict, emotionsDict):
rows = []
for n in image_names:
char,emotion,_ = n.split('_')
gender = genderDict[char][1]
emotion = emotionsDict[emotion]
rows.append([dir + '/' + n, gender, emotion])
return rows
def createSplit():
genderDict = getGenderDict()
emotionsDict = getEmotionDict()
parentDir = './data/FERG_DB_256'
characters = os.listdir(parentDir)
all_train_rows = []
all_val_rows = []
all_test_rows = []
for c1 in characters:
if(c1 not in genderDict):
continue
character_emotions = os.listdir(parentDir + '/' + c1)
for c2 in character_emotions:
if(c1 not in c2):
continue
all_images = os.listdir(parentDir + '/' + c1 + '/' + c2)
shuffle(all_images)
train = all_images[:int(0.8*len(all_images))]
valAndTest = all_images[int(0.8*len(all_images)):]
val = valAndTest[:int(len(valAndTest)/2)]
test = valAndTest[int(len(valAndTest)/2):]
cur_train = getRows(train, parentDir + '/' + c1 + '/' + c2, genderDict, emotionsDict)
cur_val = getRows(val, parentDir + '/' + c1 + '/' + c2, genderDict, emotionsDict)
cur_test = getRows(test, parentDir + '/' + c1 + '/' + c2, genderDict, emotionsDict)
all_train_rows += cur_train
all_val_rows += cur_val
all_test_rows += cur_test
trainWriter = csv.writer(open('./data/train.csv','w'))
trainWriter.writerows(all_train_rows)
valWriter = csv.writer(open('./data/val.csv','w'))
valWriter.writerows(all_val_rows)
testWriter = csv.writer(open('./data/test.csv','w'))
testWriter.writerows(all_test_rows)
createSplit()
|
nilq/baby-python
|
python
|
# coding: utf-8
#model url: http://nixeneko.2-d.jp/hatenablog/20170724_facedetection_model/snapshot_model.npz
import urllib.request
import os
def download_model(url, dest):
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
print("Downloading {}... \nThis may take several minutes.".format(dest))
urllib.request.urlretrieve(url, dest)
|
nilq/baby-python
|
python
|
from cartography.driftdetect.model import load_detector_from_json_file
from cartography.driftdetect.detect_drift import perform_drift_detection
from unittest.mock import MagicMock
def test_detector_no_drift():
"""
Test that a detector that detects no drift returns none.
:return:
"""
mock_session = MagicMock()
mock_boltstatementresult = MagicMock()
key = "key"
results = [
{key: "1"},
{key: "2"},
{key: "3"},
{key: "4"},
{key: "5"},
{key: "6"},
]
mock_boltstatementresult.__getitem__.side_effect = results.__getitem__
mock_boltstatementresult.__iter__.side_effect = results.__iter__
mock_session.run.return_value = mock_boltstatementresult
detector = load_detector_from_json_file("tests/data/detectors/test_expectations.json")
drifts = []
for it in detector.run(mock_session, False):
drifts.append(it)
mock_session.run.assert_called_with(detector.validation_query)
assert not drifts
def test_detector_picks_up_drift():
"""
Test that a detector that detects drift.
:return:
"""
key = "baseline_tag"
mock_session = MagicMock()
mock_boltstatementresult = MagicMock()
results = [
{key: "1"},
{key: "2"},
{key: "3"},
{key: "4"},
{key: "5"},
{key: "6"},
{key: "7"}
]
mock_boltstatementresult.__getitem__.side_effect = results.__getitem__
mock_boltstatementresult.__iter__.side_effect = results.__iter__
mock_session.run.return_value = mock_boltstatementresult
detector = load_detector_from_json_file("tests/data/detectors/test_expectations.json")
drifts = []
for it in detector.run(mock_session, False):
drifts.append(it)
mock_session.run.assert_called_with(detector.validation_query)
assert drifts
assert drifts[0] == {key: "7"}
def test_detector_multiple_expectations():
"""
Test that multiple fields runs properly.
:return:
"""
key_1 = "baseline_tag"
key_2 = "other_tag"
mock_session = MagicMock()
mock_boltstatementresult = MagicMock()
results = [
{key_1: "1", key_2: "8"},
{key_1: "2", key_2: "9"},
{key_1: "3", key_2: "10"},
{key_1: "4", key_2: "11"},
{key_1: "5", key_2: "12"},
{key_1: "6", key_2: "13"},
{key_1: "7", key_2: "14"}
]
mock_boltstatementresult.__getitem__.side_effect = results.__getitem__
mock_boltstatementresult.__iter__.side_effect = results.__iter__
mock_session.run.return_value = mock_boltstatementresult
detector = load_detector_from_json_file("tests/data/detectors/test_multiple_expectations.json")
drifts = []
for it in detector.run(mock_session, False):
drifts.append(it)
mock_session.run.assert_called_with(detector.validation_query)
assert {key_1: "7", key_2: "14"} in drifts
def test_drift_from_multiple_properties():
"""
Tests fields with multiple properties handles correctly.
:return:
"""
mock_session = MagicMock()
mock_boltstatementresult = MagicMock()
key_1 = "key_1"
key_2 = "key_2"
key_3 = "key_3"
results = [
{key_1: "1", key_2: "8", key_3: ["15", "22", "29"]},
{key_1: "2", key_2: "9", key_3: ["16", "23", "30"]},
{key_1: "3", key_2: "10", key_3: ["17", "24", "31"]},
{key_1: "4", key_2: "11", key_3: ["18", "25", "32"]},
{key_1: "5", key_2: "12", key_3: ["19", "26", "33"]},
{key_1: "6", key_2: "13", key_3: ["20", "27", "34"]},
{key_1: "7", key_2: "14", key_3: ["21", "28", "35"]}
]
mock_boltstatementresult.__getitem__.side_effect = results.__getitem__
mock_boltstatementresult.__iter__.side_effect = results.__iter__
mock_session.run.return_value = mock_boltstatementresult
detector = load_detector_from_json_file("tests/data/detectors/test_multiple_properties.json")
drifts = []
for it in detector.run(mock_session, False):
drifts.append(it)
mock_session.run.assert_called_with(detector.validation_query)
print(drifts)
assert {key_1: "7", key_2: "14", key_3: ["21", "28", "35"]} in drifts
assert {key_1: "3", key_2: "10", key_3: ["17", "24", "31"]} not in drifts
def test_get_drift_from_detectors():
"""
Tests full run through of drift detection.
:return:
"""
key = "baseline_tag"
key_1 = "baseline_tag"
key_2 = "other_tag"
results_1 = [
{key: "1"},
{key: "2"},
{key: "3"},
{key: "4"},
{key: "5"},
{key: "6"},
{key: "7"}
]
results_2 = [
{key_1: "1", key_2: "8"},
{key_1: "2", key_2: "9"},
{key_1: "3", key_2: "10"},
{key_1: "4", key_2: "11"},
{key_1: "5", key_2: "12"},
{key_1: "6", key_2: "13"},
{key_1: "7", key_2: "14"}
]
results_3 = [
{key: "1", key_1: "8", key_2: "15|22|29"},
{key: "2", key_1: "9", key_2: "16|23|30"},
{key: "3", key_1: "10", key_2: "17|24|31"},
{key: "4", key_1: "11", key_2: "18|25|32"},
{key: "5", key_1: "12", key_2: "19|26|33"},
{key: "6", key_1: "13", key_2: "20|27|34"}
]
mock_session = MagicMock()
mock_boltstatementresult_1 = MagicMock()
mock_boltstatementresult_2 = MagicMock()
mock_boltstatementresult_3 = MagicMock()
mock_boltstatementresult_1.__getitem__.side_effect = results_1.__getitem__
mock_boltstatementresult_1.__iter__.side_effect = results_1.__iter__
mock_boltstatementresult_2.__getitem__.side_effect = results_2.__getitem__
mock_boltstatementresult_2.__iter__.side_effect = results_2.__iter__
mock_boltstatementresult_3.__getitem__.side_effect = results_3.__getitem__
mock_boltstatementresult_3.__iter__.side_effect = results_3.__iter__
def mock_session_side_effect(*args, **kwargs):
if args[0] == "MATCH (d) RETURN d.test":
return mock_boltstatementresult_1
elif args[0] == "MATCH (d) RETURN d.test,d.test2":
return mock_boltstatementresult_2
else:
return mock_boltstatementresult_3
mock_session.run.side_effect = mock_session_side_effect
drifts = []
for drift_info, detector in perform_drift_detection(mock_session, "tests/data/detectors", False):
drifts.append(drift_info)
assert {key_1: "7", key_2: "14"} in drifts
assert {key: "7"} in drifts
assert {key_1: "3", key_2: "10"} not in drifts
def test_json_loader():
"""
Tests loading schema passes
:return:
"""
filepath = "tests/data/detectors/test_expectations.json"
detector = load_detector_from_json_file(filepath)
assert detector.name == "Test-Expectations"
assert detector.validation_query == "MATCH (d) RETURN d.test"
assert str(detector.detector_type) == "DriftDetectorType.EXPOSURE"
assert detector.expectations == [['1'], ['2'], ['3'], ['4'], ['5'], ['6']]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
deserializer interface module.
"""
from abc import abstractmethod
from threading import Lock
from pyrin.core.structs import CoreObject, MultiSingletonMeta
from pyrin.core.exceptions import CoreNotImplementedError
class DeserializerSingletonMeta(MultiSingletonMeta):
"""
deserializer singleton meta class.
this is a thread-safe implementation of singleton.
"""
_instances = dict()
_lock = Lock()
class AbstractDeserializerBase(CoreObject, metaclass=DeserializerSingletonMeta):
"""
abstract deserializer base class.
"""
@abstractmethod
def deserialize(self, value, **options):
"""
deserializes the given value.
returns `NULL` object if deserialization fails.
:param object value: value to be deserialized.
:keyword bool include_internal: specifies that any chained internal deserializer
must also be used for deserialization. if set to
False, only non-internal deserializers will be used.
defaults to True if not provided.
:raises CoreNotImplementedError: core not implemented error.
:returns: deserialized value.
"""
raise CoreNotImplementedError()
@abstractmethod
def set_next(self, deserializer):
"""
sets the next deserializer handler and returns it.
:param AbstractDeserializerBase deserializer: deserializer instance to
be set as next handler.
:raises CoreNotImplementedError: core not implemented error.
:rtype: AbstractDeserializerBase
"""
raise CoreNotImplementedError()
@property
@abstractmethod
def accepted_type(self):
"""
gets the accepted type for this deserializer.
which could deserialize values from this type.
:raises CoreNotImplementedError: core not implemented error.
:rtype: type
"""
raise CoreNotImplementedError()
@property
@abstractmethod
def internal(self):
"""
gets a value indicating that this deserializer is internal.
internal deserializers will not be used for deserializing client inputs.
:raises CoreNotImplementedError: core not implemented error.
:rtype: bool
"""
raise CoreNotImplementedError()
|
nilq/baby-python
|
python
|
import csv
import sys
import os
import os.path as path
from datetime import datetime
import math
import pandas as pd
from utility import Logger as log
DEBUG = False
def log(message):
if DEBUG:
print(message)
def main(argv):
workspace = ''
# datasets = {
# 'pecanstreet': ['California', 'Austin', 'New York'],
# 'eGauge':['Colorado']
# }
datasets = {
'pecanstreet': ['California'],
'eGauge':['Colorado']
}
data_years = {
'California': '2015', # 2015 full year for PecanStreet California dataset UTC
'Austin': '2018', # 2018 full year for PecanStreet Austin dataset UTC
'New York': '2019', # 2019/5/1-10/31 half year for PecanStreet New York dataset UTC
'Colorado': '2015' # 2015 full year for eGauge dataset
}
discharge_speed = '100'
for dataset in datasets:
for location in datasets[dataset]:
print(f'Start simulate {location} by SSTF...')
input_path = workspace + 'data/' + dataset + '/' + location + '/' + data_years[location] + '.csv'
output_path = workspace + 'data/' + dataset + '/' + location + '/logs/SSTF.csv'
# init csv file header
output_csv_header = ['timestamp', 'datetime', 'from', 'to', 'amount', 'type']
# type: share, grid, own
with open(output_path, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=output_csv_header)
writer.writeheader()
csvfile.close()
lenders = []
borrowers = []
discharge_rate = int(discharge_speed)
df = pd.read_csv(input_path)
total = len(df)
counter = 0
current_ts = 0
# house_info_path = workspace + 'data/metadata.csv'
house_info_path =workspace + 'data/' + dataset + '/' + location + '/metadata.csv'
discharge_rates = {}
# Init charge rate list
with open(house_info_path) as house_info_csv_file:
reader = csv.DictReader(house_info_csv_file)
for row in reader:
discharge_rates[row['house_id']] = discharge_rate
house_info_csv_file.close()
with open(output_path, 'a', newline='') as output_csv_file:
writer = csv.writer(output_csv_file)
with open(input_path) as input_csv_file:
reader = csv.DictReader(input_csv_file)
for row in reader:
# Skip empty row
diff = float(row['diff'])
if diff == 0:
continue
# Get timestamp
ts = int(float(row['timestamp']))
# Init current timestamp at beginning
if counter == 0:
current_ts = ts
# Increase counter
counter += 1
# process = str(counter) + '/' + str(total) + '(' + str(round(counter/total*100, 2)) + '%)'
# print(process, end='\r')
if ts != current_ts:
for bidx, borrower in enumerate(borrowers):
if len(lenders) <= 0:
log('No lender is available.')
break
# 1st. Check if can use own battery power first
for idx, lender in enumerate(lenders):
if lender['house_id'] == borrower['house_id']:
if discharge_rates[lender['house_id']] <= 0:
continue
# Borrow amount greater than own discharge rate
if borrower['diff'] >= discharge_rates[lender['house_id']]:
# Power provided by own battery is greater than discharge rate, then use discharge rate amount, keep rest for sharing
if lender['diff'] > discharge_rates[lender['house_id']]:
log('Use own: b>=d, l>d')
borrower['diff'] -= discharge_rates[lender['house_id']]
borrowers[bidx] = borrower
lender['diff'] -= discharge_rates[lender['house_id']]
lenders[idx] = lender
writer.writerow([current_ts, borrower['datetime'], lender['house_id'], borrower['house_id'], discharge_rates[lender['house_id']], 'own'])
discharge_rates[lender['house_id']] = 0
break
# Own battery cannot provide power greater than discharge rate, use up all and withdraw sharing
else:
log('Use own: b>=d, l=<d')
borrower['diff'] -= lender['diff']
borrowers[bidx] = borrower
lenders.remove(lender)
discharge_rates[lender['house_id']] -= lender['diff']
writer.writerow([current_ts, borrower['datetime'], lender['house_id'], borrower['house_id'], lender['diff'], 'own'])
# Borrow amount less than own discharge rate
else:
if borrower['diff'] >= lender['diff']:
log('own: b<d, b>=l')
borrower['diff'] -= lender['diff']
borrowers[bidx] = borrower
lenders.remove(lender)
discharge_rates[lender['house_id']] -= lender['diff']
writer.writerow([current_ts, borrower['datetime'], lender['house_id'], borrower['house_id'], lender['diff'], 'own'])
else:
log('own: b<d, b<l')
lender['diff'] -= borrower['diff']
lenders[idx] = lender
discharge_rates[lender['house_id']] -= borrower['diff']
writer.writerow([current_ts, borrower['datetime'], lender['house_id'], borrower['house_id'], borrower['diff'], 'own'])
borrower['diff'] = 0
borrowers[bidx] = borrower
if borrower['diff'] == 0:
break
elif borrower['diff'] < 0:
log('Error: borrowing amount is negative!')
# 2. Borrow from other lenders
# if len(lenders) < 1:
# log('No lender is available.')
# break
lender_index = 0
while borrower['diff'] > 0:
# log(borrower['diff'])
# log(str(lender_index) + '/' + str(len(lenders)))
if lender_index < len(lenders):
lender = lenders[lender_index]
if discharge_rates[lender['house_id']] <= 0:
lender_index += 1
continue
if lender['house_id'] == borrower['house_id']:
lender_index += 1
continue
lend_amount = abs(float(lenders[lender_index]['diff']))
# log(lender['diff'])
# Borrow amount greater than own discharge rate
if borrower['diff'] >= discharge_rates[lender['house_id']]:
# Power provided by lender's battery is greater than discharge rate, then use discharge rate amount, keep rest for sharing
if lender['diff'] > discharge_rates[lender['house_id']]:
log('Share: b>=d, l>d')
borrower['diff'] -= discharge_rates[lender['house_id']]
borrowers[bidx] = borrower
lender['diff'] -= discharge_rates[lender['house_id']]
lenders[lender_index] = lender
writer.writerow([current_ts, borrower['datetime'], lender['house_id'], borrower['house_id'], discharge_rates[lender['house_id']], 'share'])
discharge_rates[lender['house_id']] = 0
# Own battery cannot provide power greater than discharge rate, use up all and withdraw sharing
else:
log('Share: b>=d, l=<d')
borrower['diff'] -= lender['diff']
borrowers[bidx] = borrower
lenders.remove(lender)
discharge_rates[lender['house_id']] -= lender['diff']
writer.writerow([current_ts, borrower['datetime'], lender['house_id'], borrower['house_id'], lender['diff'], 'share'])
# Borrow amount less than lender's discharge rate
else:
if borrower['diff'] >= lender['diff']:
log('Share: b<d, b>=l')
borrower['diff'] -= lender['diff']
borrowers[bidx] = borrower
lenders.remove(lender)
discharge_rates[lender['house_id']] -= lender['diff']
writer.writerow([current_ts, borrower['datetime'], lender['house_id'], borrower['house_id'], lender['diff'], 'share'])
else:
log('Share: b<d, b<l')
lender['diff'] -= borrower['diff']
lenders[lender_index] = lender
discharge_rates[lender['house_id']] -= borrower['diff']
writer.writerow([current_ts, borrower['datetime'], lender['house_id'], borrower['house_id'], borrower['diff'], 'share'])
borrower['diff'] = 0
borrowers[bidx] = borrower
lender_index += 1
# No lenders available, get from grid
else:
log('grid')
writer.writerow([current_ts, borrower['datetime'], 0, borrower['house_id'], borrower['diff'], 'grid'])
borrower['diff'] = 0
break
if borrower['diff'] == 0:
break
elif borrower['diff'] < 0:
log('Error: borrowing amount is negative!')
# Reset dicharge rate list
for dr in discharge_rates:
discharge_rates[dr] = discharge_rate
# Reset borrowers list
borrowers = []
# Sum up power left in batteries
battery_remain = 0
for l in lenders:
battery_remain += l['diff']
if battery_remain > 0:
dt = datetime.fromtimestamp(current_ts)
writer.writerow([current_ts, dt, '', '', battery_remain, 'battery_remain'])
current_ts = ts
row['diff'] = abs(diff)
if diff < 0:
borrowers.append(row)
else:
lenders.insert(0, row)
# log(str(counter) + ':' + str(ts))
input_csv_file.close()
output_csv_file.close()
if __name__ == "__main__":
# log.debug_off()
start = datetime.now()
main(sys.argv[1:])
print("Finished in ", datetime.now()-start, '.')
|
nilq/baby-python
|
python
|
from rqalpha.api import *
def init(context):
context.S1 = "510500.XSHG"
context.UNIT = 10000
context.INIT_S = 2
context.MARGIN = 0.08
context.FIRST_P = 0
context.holdid = 0
context.sellcount = 0
context.inited = False
logger.info("RunInfo: {}".format(context.run_info))
def before_trading(context):
pass
def current_p(context):
return context.FIRST_P - ((context.holdid * context.MARGIN) * context.FIRST_P)
def next_buy_p(context):
if context.portfolio.cash < context.UNIT:
return -1
return context.FIRST_P - (((context.holdid + 1) * context.MARGIN) * context.FIRST_P)
def next_sell_p(context):
if context.portfolio.market_value < context.UNIT:
return -1
return context.FIRST_P - (((context.holdid - 1) * context.MARGIN) * context.FIRST_P)
def handle_bar(context, bar_dict):
bar = bar_dict[context.S1]
if context.inited is True:
nextB = next_buy_p(context)
nextS = next_sell_p(context)
if context.inited is False:
context.inited = True
order_value(context.S1, context.UNIT * context.INIT_S, price=bar.close)
context.current_cash = 0
context.holdid = 0
context.FIRST_P = bar.open
logger.info("Make first fire portfolio: {}".format(context.portfolio))
elif bar.low <= nextB <= bar.high:
res = order_value(context.S1, context.UNIT, nextB)
if res.status == ORDER_STATUS.FILLED:
context.holdid += 1
else:
logger.info("Buy failed: {}".format(res))
elif bar.high < nextB:
res = order_value(context.S1, context.UNIT, price=bar.high)
if res.status == ORDER_STATUS.FILLED:
context.holdid += 1
else:
logger.info("Buy failed: {}".format(res))
elif bar.low <= nextS <= bar.high:
res = order_value(context.S1, -1 * context.UNIT, price=nextS)
if res.status == ORDER_STATUS.FILLED:
context.holdid -= 1
context.sellcount += 1
logger.info("----- Sell count: {}".format(context.sellcount))
else:
logger.info("Sell failed: {}".format(res))
elif nextS != -1 and bar.low > nextS:
res = order_value(context.S1, -1 * context.UNIT, price=bar.low)
if res.status == ORDER_STATUS.FILLED:
context.holdid -= 1
context.sellcount += 1
logger.info("----- Sell count: {}".format(context.sellcount))
else:
logger.info("Sell failed: {}".format(res))
def after_trading(context):
logger.info("Hold count: {}".format(context.holdid + 1))
profit = (context.portfolio.cash + context.portfolio.market_value - context.portfolio.starting_cash)
profit_pct = profit / (context.portfolio.market_value - profit)
logger.info("after_trading: market_value {}, profit {}, percent {}".
format(context.portfolio.market_value, profit, profit_pct))
|
nilq/baby-python
|
python
|
#//
#//------------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
#
#//------------------------------------------------------------------------------
#//
#// CLASS: uvm_push_driver #(REQ,RSP)
#//
#// Base class for a driver that passively receives transactions, i.e. does not
#// initiate requests transactions. Also known as ~push~ mode. Its ports are
#// typically connected to the corresponding ports in a push sequencer as follows:
#//
#//| push_sequencer.req_port.connect(push_driver.req_export);
#//| push_driver.rsp_port.connect(push_sequencer.rsp_export);
#//
#// The ~rsp_port~ needs connecting only if the driver will use it to write
#// responses to the analysis export in the sequencer.
#//
#//------------------------------------------------------------------------------
#
#class uvm_push_driver #(type REQ=uvm_sequence_item,
# type RSP=REQ) extends uvm_component;
#
# // Port: req_export
# //
# // This export provides the blocking put interface whose default
# // implementation produces an error. Derived drivers must override ~put~
# // with an appropriate implementation (and not call super.put). Ports
# // connected to this export will supply the driver with transactions.
#
# uvm_blocking_put_imp #(REQ, uvm_push_driver #(REQ,RSP)) req_export;
#
# // Port: rsp_port
# //
# // This analysis port is used to send response transactions back to the
# // originating sequencer.
#
# uvm_analysis_port #(RSP) rsp_port;
#
# REQ req;
# RSP rsp;
#
# // Function: new
# //
# // Creates and initializes an instance of this class using the normal
# // constructor arguments for <uvm_component>: ~name~ is the name of the
# // instance, and ~parent~ is the handle to the hierarchical parent, if any.
#
# function new (string name, uvm_component parent);
# super.new(name, parent);
# req_export = new("req_export", this);
# rsp_port = new("rsp_port", this);
# endfunction
#
# function void check_port_connections();
# if (req_export.size() != 1)
# uvm_report_fatal("Connection Error",
# $sformatf("Must connect to seq_item_port(%0d)",
# req_export.size()), UVM_NONE);
# endfunction
#
# virtual function void end_of_elaboration_phase(uvm_phase phase);
# super.end_of_elaboration_phase(phase);
# check_port_connections();
# endfunction
#
# virtual task put(REQ item);
# uvm_report_fatal("UVM_PUSH_DRIVER", "Put task for push driver is not implemented", UVM_NONE);
# endtask
#
# const static string type_name = "uvm_push_driver #(REQ,RSP)";
#
# virtual function string get_type_name ();
# return type_name;
# endfunction
#
#endclass
#
|
nilq/baby-python
|
python
|
def rt(ip):
return [10,15,20]
|
nilq/baby-python
|
python
|
from unittest import TestCase
from ua_model.utils import validate_branch_point_positions
class TestFunctionUtils(TestCase):
def test_validate_branch_point_positions(self):
with self.subTest(msg='valid parameters'):
self.assertIsNone(validate_branch_point_positions(t_0=0.1, t_in=1.0))
with self.subTest(msg='valid parameters, t_0 = 0'):
self.assertIsNone(validate_branch_point_positions(t_0=0.0, t_in=0.1))
with self.subTest(msg='negative t_0'):
self.assertRaises(ValueError, validate_branch_point_positions, t_0=-0.1, t_in=1.0)
with self.subTest(msg='t_in < t_0'):
self.assertRaises(ValueError, validate_branch_point_positions, t_0=0.1, t_in=0.0)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""TTS Interface realted modules."""
from espnet.asr.asr_utils import torch_load
try:
import chainer
except ImportError:
Reporter = None
else:
class Reporter(chainer.Chain):
"""Reporter module."""
def report(self, dicts):
"""Report values from a given dict."""
for d in dicts:
chainer.reporter.report(d, self)
class TTSInterface(object):
"""TTS Interface for ESPnet model implementation."""
@staticmethod
def add_arguments(parser):
"""Add model specific argments to parser."""
return parser
def __init__(self):
"""Initilize TTS module."""
self.reporter = Reporter()
def forward(self, *args, **kwargs):
"""Calculate TTS forward propagation.
Returns:
Tensor: Loss value.
"""
raise NotImplementedError("forward method is not implemented")
def inference(self, *args, **kwargs):
"""Generate the sequence of features given the sequences of characters.
Returns:
Tensor: The sequence of generated features (L, odim).
Tensor: The sequence of stop probabilities (L,).
Tensor: The sequence of attention weights (L, T).
"""
raise NotImplementedError("inference method is not implemented")
def calculate_all_attentions(self, *args, **kwargs):
"""Calculate TTS attention weights.
Args:
Tensor: Batch of attention weights (B, Lmax, Tmax).
"""
raise NotImplementedError("calculate_all_attentions method is not implemented")
def load_pretrained_model(self, model_path):
"""Load pretrained model parameters."""
torch_load(model_path, self)
@property
def attention_plot_class(self):
"""Plot attention weights."""
from espnet.asr.asr_utils import PlotAttentionReport
return PlotAttentionReport
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
The keys should match what `chainer.reporter` reports.
if you add the key `loss`,
the reporter will report `main/loss` and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list[str]: Base keys to plot during training.
"""
return ["loss"]
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.5 on 2019-09-09 17:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0002_auto_20190909_2253'),
]
operations = [
migrations.AddField(
model_name='product',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from data_stack.dataset.factory import BaseDatasetFactory
from data_stack.dataset.iterator import DatasetIteratorIF
from data_stack.dataset.meta import MetaFactory
from outlier_hub.datasets.toy_datasets.uniform_noise.iterator import UniformNoiseIterator
from typing import List, Tuple, Dict, Any
class UniformNoiseFactory(BaseDatasetFactory):
"""Builds a half moon dataset.
"""
def __init__(self):
super().__init__()
def _get_iterator(self, split: str, num_samples: List[int], classes: List[int], hypercube: List[Tuple[int, int]], seed: int = 1):
meta = MetaFactory.get_iterator_meta(
sample_pos=0, target_pos=1, tag_pos=2)
return UniformNoiseIterator(seed=seed, num_samples=num_samples, classes=classes, hypercube=hypercube), meta
def get_dataset_iterator(self, config: Dict[str, Any] = None) -> DatasetIteratorIF:
return self._get_iterator(**config)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import torch
classes = [0, 1]
hypercube = [(-1, 1), (3, 6)]
config = {"seed": 1, "classes": classes, "num_samples": [
2000, 2000], "hypercube": hypercube, "split": "full"}
factory = UniformNoiseFactory()
iterator, meta = factory.get_dataset_iterator(config)
samples, targets = zip(*[(s, t) for s, t, _ in iterator])
sample_tensor = torch.stack(samples)
class_0_samples = sample_tensor[torch.IntTensor(targets) == 0]
class_1_samples = sample_tensor[torch.IntTensor(targets) == 1]
plt.scatter(*list(zip(*class_0_samples)), color='red', s=1)
plt.scatter(*list(zip(*class_1_samples)), color='blue', s=1)
plt.show()
|
nilq/baby-python
|
python
|
n = int(input()) // 4
print(n * n)
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
from web_constants import *
from signatures import Signatures, get_signatures_by_mut_type
from project_data import ProjectData, get_selected_project_data
from compute_reconstruction import compute_reconstruction
from scale_samples import scale_samples
def plot_reconstruction(chosen_sigs, projects, mut_type, single_sample_id=None, normalize=False, tricounts_method=None):
result = []
reconstruction_df = compute_reconstruction(chosen_sigs, projects, mut_type, single_sample_id=single_sample_id, normalize=normalize, tricounts_method=tricounts_method)
reconstruction_dict = reconstruction_df.to_dict(orient='index')
if single_sample_id == None:
samples = scale_samples(projects)
else:
samples = [single_sample_id]
def create_sample_obj(sample_id):
sample_obj = reconstruction_dict[sample_id]
sample_obj["sample_id"] = sample_id
return sample_obj
result = list(map(create_sample_obj, samples))
if single_sample_id != None: # single sample request
result_obj = result[0]
result = []
for cat, value in result_obj.items():
result.append({
"cat_" + mut_type: cat,
"reconstruction_" + mut_type + "_" + single_sample_id: value
})
return result
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module ADTRAN-IF-PERF-HISTORY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADTRAN-IF-PERF-HISTORY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 16:59:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
adGenAOSConformance, adGenAOSCommon = mibBuilder.importSymbols("ADTRAN-AOS", "adGenAOSConformance", "adGenAOSCommon")
adIdentity, = mibBuilder.importSymbols("ADTRAN-MIB", "adIdentity")
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
HCPerfCurrentCount, HCPerfInvalidIntervals, HCPerfTotalCount, HCPerfTimeElapsed, HCPerfIntervalCount, HCPerfValidIntervals = mibBuilder.importSymbols("HC-PerfHist-TC-MIB", "HCPerfCurrentCount", "HCPerfInvalidIntervals", "HCPerfTotalCount", "HCPerfTimeElapsed", "HCPerfIntervalCount", "HCPerfValidIntervals")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Bits, Gauge32, ObjectIdentity, ModuleIdentity, Counter64, iso, TimeTicks, Unsigned32, NotificationType, MibIdentifier, Counter32, Integer32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Gauge32", "ObjectIdentity", "ModuleIdentity", "Counter64", "iso", "TimeTicks", "Unsigned32", "NotificationType", "MibIdentifier", "Counter32", "Integer32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
adGenAosIfPerfHistoryMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 664, 6, 10000, 53, 1, 7))
adGenAosIfPerfHistoryMib.setRevisions(('2013-08-23 00:00',))
if mibBuilder.loadTexts: adGenAosIfPerfHistoryMib.setLastUpdated('201308230000Z')
if mibBuilder.loadTexts: adGenAosIfPerfHistoryMib.setOrganization('ADTRAN Inc.')
adGenAosIfPerfHistory = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7))
adIfPhCurTable = MibTable((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1), )
if mibBuilder.loadTexts: adIfPhCurTable.setStatus('current')
adIfPhCurEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: adIfPhCurEntry.setStatus('current')
adIfPhCurTimeElapsed15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 1), HCPerfTimeElapsed()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurTimeElapsed15Min.setStatus('current')
adIfPhCurValidIntervals15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 2), HCPerfValidIntervals()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurValidIntervals15Min.setStatus('current')
adIfPhCurInvalidIntervals15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 3), HCPerfInvalidIntervals()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInvalidIntervals15Min.setStatus('current')
adIfPhCurInOctets15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 4), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInOctets15Min.setStatus('current')
adIfPhCurInUcastPkts15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 5), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInUcastPkts15Min.setStatus('current')
adIfPhCurInMcastPkts15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 6), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInMcastPkts15Min.setStatus('current')
adIfPhCurInBcastPkts15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 7), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInBcastPkts15Min.setStatus('current')
adIfPhCurInDiscards15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 8), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInDiscards15Min.setStatus('current')
adIfPhCurInErrors15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 9), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInErrors15Min.setStatus('current')
adIfPhCurInUnknownProtos15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 10), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInUnknownProtos15Min.setStatus('current')
adIfPhCurOutOctets15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 11), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutOctets15Min.setStatus('current')
adIfPhCurOutUcastPkts15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 12), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutUcastPkts15Min.setStatus('current')
adIfPhCurOutMcastPkts15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 13), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutMcastPkts15Min.setStatus('current')
adIfPhCurOutBcastPkts15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 14), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutBcastPkts15Min.setStatus('current')
adIfPhCurOutDiscards15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 15), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutDiscards15Min.setStatus('current')
adIfPhCurOutErrors15Min = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 16), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutErrors15Min.setStatus('current')
adIfPhCurTimeElapsed1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 17), HCPerfTimeElapsed()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurTimeElapsed1Day.setStatus('current')
adIfPhCurValidIntervals1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 18), HCPerfValidIntervals()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurValidIntervals1Day.setStatus('current')
adIfPhCurInvalidIntervals1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 19), HCPerfInvalidIntervals()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInvalidIntervals1Day.setStatus('current')
adIfPhCurInOctets1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 20), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInOctets1Day.setStatus('current')
adIfPhCurInUcastPkts1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 21), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInUcastPkts1Day.setStatus('current')
adIfPhCurInMcastPkts1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 22), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInMcastPkts1Day.setStatus('current')
adIfPhCurInBcastPkts1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 23), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInBcastPkts1Day.setStatus('current')
adIfPhCurInDiscards1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 24), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInDiscards1Day.setStatus('current')
adIfPhCurInErrors1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 25), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInErrors1Day.setStatus('current')
adIfPhCurInUnknownProtos1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 26), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurInUnknownProtos1Day.setStatus('current')
adIfPhCurOutOctets1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 27), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutOctets1Day.setStatus('current')
adIfPhCurOutUcastPkts1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 28), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutUcastPkts1Day.setStatus('current')
adIfPhCurOutMcastPkts1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 29), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutMcastPkts1Day.setStatus('current')
adIfPhCurOutBcastPkts1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 30), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutBcastPkts1Day.setStatus('current')
adIfPhCurOutDiscards1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 31), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutDiscards1Day.setStatus('current')
adIfPhCurOutErrors1Day = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 1, 1, 32), HCPerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPhCurOutErrors1Day.setStatus('current')
adIfPh15MinIntervalTable = MibTable((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2), )
if mibBuilder.loadTexts: adIfPh15MinIntervalTable.setStatus('current')
adIfPh15MinIntervalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinIntervalNumber"))
if mibBuilder.loadTexts: adIfPh15MinIntervalEntry.setStatus('current')
adIfPh15MinIntervalNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96)))
if mibBuilder.loadTexts: adIfPh15MinIntervalNumber.setStatus('current')
adIfPh15MinInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 2), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinInOctets.setStatus('current')
adIfPh15MinInUcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 3), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinInUcastPkts.setStatus('current')
adIfPh15MinInMcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 4), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinInMcastPkts.setStatus('current')
adIfPh15MinInBcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 5), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinInBcastPkts.setStatus('current')
adIfPh15MinInDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 6), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinInDiscards.setStatus('current')
adIfPh15MinInErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 7), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinInErrors.setStatus('current')
adIfPh15MinInUnknownProtos = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 8), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinInUnknownProtos.setStatus('current')
adIfPh15MinOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 9), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinOutOctets.setStatus('current')
adIfPh15MinOutUcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 10), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinOutUcastPkts.setStatus('current')
adIfPh15MinOutMcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 11), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinOutMcastPkts.setStatus('current')
adIfPh15MinOutBcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 12), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinOutBcastPkts.setStatus('current')
adIfPh15MinOutDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 13), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinOutDiscards.setStatus('current')
adIfPh15MinOutErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 2, 1, 14), HCPerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh15MinOutErrors.setStatus('current')
adIfPh1DayIntervalTable = MibTable((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3), )
if mibBuilder.loadTexts: adIfPh1DayIntervalTable.setStatus('current')
adIfPh1DayIntervalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayIntervalNumber"))
if mibBuilder.loadTexts: adIfPh1DayIntervalEntry.setStatus('current')
adIfPh1DayIntervalNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 30)))
if mibBuilder.loadTexts: adIfPh1DayIntervalNumber.setStatus('current')
adIfPh1DayInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 2), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayInOctets.setStatus('current')
adIfPh1DayInUcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 3), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayInUcastPkts.setStatus('current')
adIfPh1DayInMcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 4), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayInMcastPkts.setStatus('current')
adIfPh1DayInBcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 5), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayInBcastPkts.setStatus('current')
adIfPh1DayInDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 6), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayInDiscards.setStatus('current')
adIfPh1DayInErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 7), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayInErrors.setStatus('current')
adIfPh1DayInUnknownProtos = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 8), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayInUnknownProtos.setStatus('current')
adIfPh1DayOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 9), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayOutOctets.setStatus('current')
adIfPh1DayOutUcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 10), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayOutUcastPkts.setStatus('current')
adIfPh1DayOutMcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 11), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayOutMcastPkts.setStatus('current')
adIfPh1DayOutBcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 12), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayOutBcastPkts.setStatus('current')
adIfPh1DayOutDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 13), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayOutDiscards.setStatus('current')
adIfPh1DayOutErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 5, 53, 1, 7, 3, 1, 14), HCPerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adIfPh1DayOutErrors.setStatus('current')
adGenAosIfPerfHistoryConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 5, 53, 99, 16))
adGenAosIfPerfHistoryGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 5, 53, 99, 16, 1))
adGenAosIfPerfHistoryCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 5, 53, 99, 16, 2))
adGenAosIfPerfHistoryCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 664, 5, 53, 99, 16, 2, 1)).setObjects(("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurGroup"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinIntervalGroup"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayIntervalGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
adGenAosIfPerfHistoryCompliance = adGenAosIfPerfHistoryCompliance.setStatus('current')
adIfPhCurGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 664, 5, 53, 99, 16, 1, 1)).setObjects(("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurTimeElapsed15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurValidIntervals15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInvalidIntervals15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInOctets15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInUcastPkts15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInMcastPkts15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInBcastPkts15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInDiscards15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInErrors15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInUnknownProtos15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutOctets15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutUcastPkts15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutMcastPkts15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutBcastPkts15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutDiscards15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutErrors15Min"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurTimeElapsed1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurValidIntervals1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInvalidIntervals1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInOctets1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInUcastPkts1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInMcastPkts1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInBcastPkts1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInDiscards1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInErrors1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurInUnknownProtos1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutOctets1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutUcastPkts1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutMcastPkts1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutBcastPkts1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutDiscards1Day"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPhCurOutErrors1Day"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
adIfPhCurGroup = adIfPhCurGroup.setStatus('current')
adIfPh15MinIntervalGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 664, 5, 53, 99, 16, 1, 2)).setObjects(("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinInOctets"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinInUcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinInMcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinInBcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinInDiscards"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinInErrors"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinInUnknownProtos"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinOutOctets"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinOutUcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinOutMcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinOutBcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinOutDiscards"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh15MinOutErrors"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
adIfPh15MinIntervalGroup = adIfPh15MinIntervalGroup.setStatus('current')
adIfPh1DayIntervalGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 664, 5, 53, 99, 16, 1, 3)).setObjects(("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayInOctets"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayInUcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayInMcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayInBcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayInDiscards"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayInErrors"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayInUnknownProtos"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayOutOctets"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayOutUcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayOutMcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayOutBcastPkts"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayOutDiscards"), ("ADTRAN-IF-PERF-HISTORY-MIB", "adIfPh1DayOutErrors"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
adIfPh1DayIntervalGroup = adIfPh1DayIntervalGroup.setStatus('current')
mibBuilder.exportSymbols("ADTRAN-IF-PERF-HISTORY-MIB", adIfPhCurInDiscards15Min=adIfPhCurInDiscards15Min, adIfPh15MinInOctets=adIfPh15MinInOctets, adIfPh1DayIntervalGroup=adIfPh1DayIntervalGroup, adIfPh15MinOutBcastPkts=adIfPh15MinOutBcastPkts, adIfPhCurValidIntervals1Day=adIfPhCurValidIntervals1Day, adIfPhCurInErrors1Day=adIfPhCurInErrors1Day, adIfPhCurInUcastPkts15Min=adIfPhCurInUcastPkts15Min, adIfPh1DayIntervalNumber=adIfPh1DayIntervalNumber, adIfPh15MinIntervalGroup=adIfPh15MinIntervalGroup, adIfPhCurInvalidIntervals1Day=adIfPhCurInvalidIntervals1Day, adIfPh1DayInUcastPkts=adIfPh1DayInUcastPkts, adIfPhCurTable=adIfPhCurTable, adIfPhCurTimeElapsed15Min=adIfPhCurTimeElapsed15Min, adIfPhCurOutOctets15Min=adIfPhCurOutOctets15Min, adIfPh1DayOutUcastPkts=adIfPh1DayOutUcastPkts, adIfPhCurInMcastPkts15Min=adIfPhCurInMcastPkts15Min, adIfPh1DayInUnknownProtos=adIfPh1DayInUnknownProtos, adIfPh1DayInOctets=adIfPh1DayInOctets, adIfPh1DayOutBcastPkts=adIfPh1DayOutBcastPkts, adGenAosIfPerfHistoryMib=adGenAosIfPerfHistoryMib, adIfPh15MinInUcastPkts=adIfPh15MinInUcastPkts, adIfPhCurValidIntervals15Min=adIfPhCurValidIntervals15Min, adIfPhCurTimeElapsed1Day=adIfPhCurTimeElapsed1Day, adIfPhCurOutUcastPkts15Min=adIfPhCurOutUcastPkts15Min, adIfPhCurOutUcastPkts1Day=adIfPhCurOutUcastPkts1Day, adIfPh15MinOutDiscards=adIfPh15MinOutDiscards, adIfPh15MinIntervalNumber=adIfPh15MinIntervalNumber, adIfPhCurInErrors15Min=adIfPhCurInErrors15Min, adIfPhCurOutErrors1Day=adIfPhCurOutErrors1Day, adIfPh1DayIntervalEntry=adIfPh1DayIntervalEntry, adIfPh1DayInDiscards=adIfPh1DayInDiscards, adIfPhCurInOctets1Day=adIfPhCurInOctets1Day, adIfPhCurInUnknownProtos15Min=adIfPhCurInUnknownProtos15Min, adIfPhCurOutBcastPkts1Day=adIfPhCurOutBcastPkts1Day, adIfPhCurOutErrors15Min=adIfPhCurOutErrors15Min, adGenAosIfPerfHistoryCompliance=adGenAosIfPerfHistoryCompliance, PYSNMP_MODULE_ID=adGenAosIfPerfHistoryMib, adIfPh1DayOutDiscards=adIfPh1DayOutDiscards, adIfPh1DayIntervalTable=adIfPh1DayIntervalTable, adIfPhCurOutDiscards15Min=adIfPhCurOutDiscards15Min, adGenAosIfPerfHistoryGroups=adGenAosIfPerfHistoryGroups, adIfPh15MinInMcastPkts=adIfPh15MinInMcastPkts, adIfPh15MinIntervalEntry=adIfPh15MinIntervalEntry, adIfPh1DayOutErrors=adIfPh1DayOutErrors, adGenAosIfPerfHistoryCompliances=adGenAosIfPerfHistoryCompliances, adIfPhCurOutMcastPkts1Day=adIfPhCurOutMcastPkts1Day, adIfPhCurEntry=adIfPhCurEntry, adIfPh15MinOutUcastPkts=adIfPh15MinOutUcastPkts, adIfPh1DayInMcastPkts=adIfPh1DayInMcastPkts, adIfPhCurInMcastPkts1Day=adIfPhCurInMcastPkts1Day, adIfPh1DayInErrors=adIfPh1DayInErrors, adIfPhCurOutMcastPkts15Min=adIfPhCurOutMcastPkts15Min, adIfPh1DayOutMcastPkts=adIfPh1DayOutMcastPkts, adIfPh1DayOutOctets=adIfPh1DayOutOctets, adGenAosIfPerfHistory=adGenAosIfPerfHistory, adIfPhCurInUcastPkts1Day=adIfPhCurInUcastPkts1Day, adIfPhCurOutBcastPkts15Min=adIfPhCurOutBcastPkts15Min, adIfPhCurInUnknownProtos1Day=adIfPhCurInUnknownProtos1Day, adIfPhCurInDiscards1Day=adIfPhCurInDiscards1Day, adIfPh15MinInErrors=adIfPh15MinInErrors, adIfPhCurInBcastPkts15Min=adIfPhCurInBcastPkts15Min, adIfPh15MinIntervalTable=adIfPh15MinIntervalTable, adIfPhCurInvalidIntervals15Min=adIfPhCurInvalidIntervals15Min, adIfPh15MinInBcastPkts=adIfPh15MinInBcastPkts, adIfPh15MinOutOctets=adIfPh15MinOutOctets, adIfPh15MinOutMcastPkts=adIfPh15MinOutMcastPkts, adIfPhCurOutDiscards1Day=adIfPhCurOutDiscards1Day, adIfPh15MinInDiscards=adIfPh15MinInDiscards, adIfPh15MinInUnknownProtos=adIfPh15MinInUnknownProtos, adIfPhCurInBcastPkts1Day=adIfPhCurInBcastPkts1Day, adIfPh15MinOutErrors=adIfPh15MinOutErrors, adGenAosIfPerfHistoryConformance=adGenAosIfPerfHistoryConformance, adIfPhCurGroup=adIfPhCurGroup, adIfPhCurOutOctets1Day=adIfPhCurOutOctets1Day, adIfPhCurInOctets15Min=adIfPhCurInOctets15Min, adIfPh1DayInBcastPkts=adIfPh1DayInBcastPkts)
|
nilq/baby-python
|
python
|
from collections import OrderedDict
import gin
import matplotlib.cm as cm
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import seaborn as sns
import torch
from graphviz import Digraph
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import traceback
from causal_util.helpers import lstdct2dctlst
from sparse_causal_model_learner_rl.trainable.helpers import params_shape, flatten_params
from sparse_causal_model_learner_rl.trainable.helpers import unflatten_params
import logging
import os
from imageio import imread
import cv2
def add_artifact(fn, ex, do_sacred, epochs, epoch_info):
if do_sacred:
ex.add_artifact(fn, name=("epoch_%05d_" % epochs) + os.path.basename(fn))
else:
logging.info(f"Artifact available: {fn}")
# export of images to tensorflow (super slow...)
if fn.endswith('.png'):
try:
# downscaling the image as ray is slow with big images...
img = imread(fn, pilmode='RGB')
x, y = img.shape[0:2]
factor_x, factor_y = 1, 1
mx, my = 150., 150.
if x > mx:
factor_x = mx / x
if y > my:
factor_y = my / y
factor = min(factor_x, factor_y)
if factor != 1:
new_shape = (x * factor, y * factor)
new_shape = tuple((int(t) for t in new_shape))[::-1]
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA)
img = np.array(img, dtype=np.float32) / 255.
img = img.swapaxes(0, 2)
img = img.swapaxes(1, 2)
# img = np.expand_dims(img, 0)
# img = np.expand_dims(img, 0)
epoch_info[os.path.basename(fn)[:-4]] = img
except Exception as e:
logging.error(f"Can't read image: {fn} {e} {type(e)}")
print(traceback.format_exc())
@gin.configurable
def plot_model(model, vmin=None, vmax=None, additional_features=None,
singlecolor_palette=False):
"""Plot models (action and features) as a heatmap."""
cm = sns.diverging_palette(0, 129, l=70, s=100, n=500, center="dark")
if singlecolor_palette:
cm = sns.dark_palette(np.array((148, 255, 0)) / 255., n_colors=500)
fig = plt.figure(figsize=(10, 5))
fig.patch.set_facecolor('xkcd:mint green')
Mf, Ma = model.Mf, model.Ma
plt.subplot(1, 2, 1)
plt.title("Model for features")
xt_f = ['f%02d' % i for i in range(Mf.shape[1])]
xt_a = ['a%02d' % i for i in range(Ma.shape[1])]
yt = ['f\'%02d' % i for i in range(Mf.shape[0])]
if additional_features:
yt[-len(additional_features):] = additional_features
max_f = np.max(np.abs(Mf))
vmin_ = vmin if vmin is not None else -max_f
vmax_ = vmax if vmax is not None else max_f
sns.heatmap(Mf, vmin=vmin_, vmax=vmax_, cmap=cm,
xticklabels=xt_f, yticklabels=yt)
plt.xlabel('Old features')
plt.ylabel('New features')
plt.subplot(1, 2, 2)
plt.title("Model for actions")
max_a = np.max(np.abs(Ma))
vmin_ = vmin if vmin is not None else -max_a
vmax_ = vmax if vmax is not None else max_a
sns.heatmap(Ma, vmin=vmin_, vmax=vmax_, cmap=cm,
xticklabels=xt_a, yticklabels=yt)
plt.xlabel('Actions')
plt.ylabel('New features')
return fig
def select_threshold(array, name='exp', eps=1e-10, do_plot=True, do_log=True, thr_half=0.1):
"""Select threshold for a matrix."""
try:
if not do_log:
eps = 0
array = np.array(array)
# log would not work for low values
array[array == 0.0] = eps
aflat = np.abs(array.flatten())
if np.max(aflat) - np.min(aflat) < thr_half:
return 0.5
if do_log:
aflat = np.log(aflat)
x = pd.DataFrame({'x': aflat})
kmeans = KMeans(n_clusters=2)
kmeans.fit_transform(X=np.array(x.x).reshape((-1, 1)))
x['label'] = kmeans.labels_
clusters = np.argsort([np.min(df.x) for l, df in x.groupby('label')])
l = np.max(x.x[x.label == clusters[0]])
r = np.min(x.x[x.label == clusters[1]])
assert l < r
threshold = (l + r) / 2
if do_plot:
fig = plt.figure()
plt.hist(x.x)
plt.axvline(threshold, label='threshold')
plt.legend()
plt.savefig(f"threshold_{name}.png", bbox_inches='tight')
plt.clf()
plt.close(fig)
res = threshold
if do_log:
threshold = np.exp(threshold)
return threshold
except Exception as e:
if np.isnan(array).any():
raise ValueError(f"Threshold selection failed (NaN): {name} {type(e)} {e} {array}")
else:
print(f"Threshold selection failed (no NaN): {name} {type(e)} {e} {array}")
print(traceback.format_exc())
return 0.0
@gin.configurable
def graph_for_matrices(model, threshold_act=0.2, threshold_f=0.2, do_write=True,
additional_features=None,
last_is_constant=False,
feature_names=None,
engine='dot'):
"""Visualize matrices as a graph."""
if additional_features is None:
additional_features = []
Mf, Ma = model.Mf, model.Ma
# dimension
actions = Ma.shape[1]
features = Mf.shape[1]
Mf_t = np.abs(Mf) > threshold_f
Ma_t = np.abs(Ma) > threshold_act
keep_actions = np.where(np.max(Ma_t, axis=0))[0]
keep_features = np.where(Mf_t)
keep_features = set(keep_features[0]) | set(keep_features[1])
ps = Digraph(name='Causal model', engine=engine) # ,
# node_attr={'shape': 'plaintext'})
additional_features_dct = dict(
zip(range(Mf.shape[0])[-len(additional_features):], additional_features))
feature_names_dct = {}
if feature_names is not None:
feature_names_dct = dict(zip(range(Mf.shape[1]), feature_names))
def feature_name(idx):
if last_is_constant and idx == features - 1:
return 'const'
if idx in additional_features_dct:
return additional_features_dct[idx]
elif idx in feature_names_dct:
return feature_names_dct[idx]
else:
return 'f%02d' % idx
# adding features nodes
for f in range(features):
if f not in keep_features: continue
ps.node(feature_name(f), color='green')
# ps.node("f'%02d" % f, color='blue')
# adding action edges
for a in range(actions):
if a not in keep_actions: continue
ps.node('a%02d' % a, color='red')
# adding edges
edges = 0
for f1, a in zip(*np.where(Ma_t)):
ps.edge('a%02d' % a, feature_name(f1))
edges += 1
for f1, f in zip(*np.where(Mf_t)):
ps.edge(feature_name(f), feature_name(f1))
edges += 1
max_edges = features ** 2 + actions * features
percent = int(100 - 100. * edges / max_edges)
# print("Number of edges: %d out of %d, sparsity %.2f%%" % \
# (edges, max_edges, percent))
f_out = None
if do_write:
f_out = f"CausalModel"
ps.render(filename=f_out, format='png')
return ps, f_out
def get_weights_from_learner(learner, weight_names):
"""Get history from a learner for specific weights only."""
keys = [f"weights/{weight}" for weight in weight_names]
history = lstdct2dctlst(learner.history)
lengths = [len(history[key]) for key in keys]
assert all(lengths[0] == l for l in lengths)
result = []
for i in range(lengths[0]):
weights_now = [history[f"weights/{weight}"][i] for weight in weight_names]
result.append(weights_now)
return result
def total_loss(learner, opt_label='opt1'):
"""Get total loss for an optimizer"""
total_loss = 0
for loss_label in learner.config['execution'][opt_label]:
loss = learner.config['losses'][loss_label]
if learner._context_cache is None:
learner._context
value = loss['fcn'](**learner._context_cache)
coeff = loss['coeff']
if isinstance(value, dict):
value = value['loss']
total_loss += coeff * value
return total_loss.item() if hasattr(total_loss, 'item') else total_loss
def set_weights(weights, data_numpy):
"""Set weights from numpy arrays."""
assert len(weights) == len(data_numpy)
for w, data in zip(weights, data_numpy):
w.data = torch.from_numpy(data).to(w.dtype).to(w.device)
def with_weights(weights_list, dest_shape=None):
"""Decorate a function: make it take additional weights argument."""
def wrap(f):
def g(w, weights_list=weights_list, dest_shape=dest_shape, *args, **kwargs):
"""Call f with given weights."""
# unflattening paramters if requested
if dest_shape is not None:
w = unflatten_params(w, dest_shape)
# setting weights
set_weights(weights=weights_list, data_numpy=w)
# calling the original function
return f(*args, **kwargs)
return g
return wrap
def weight_name_to_param(trainables, name):
"""Return a torch variable corresponding to a name in trainables."""
trainable_name, weight_name = name.split('/')
return OrderedDict(trainables[trainable_name].named_parameters())[weight_name]
def select_weights(trainables, weight_names):
"""Select weights from models by names."""
return [weight_name_to_param(trainables, w) for w in weight_names]
def loss_and_history(learner, loss, weight_names):
"""Return loss function and flat history, given weight names."""
# list of ALL trainable variables
trainables = learner.trainables
# relevant weights history
weights_history = get_weights_from_learner(learner, weight_names)
# parameters to track/vary
weights = select_weights(trainables, weight_names)
# destination shape
shape = params_shape(weights)
# function taking parameters and outputting loss
loss_w = with_weights(weights, dest_shape=shape)(loss)
# history of weight changes (flattened)
flat_history = [flatten_params(p) for p in weights_history]
return loss_w, flat_history
@gin.configurable
def plot_contour(flat_history, loss_w, scale=5, n=50):
"""Contour plot from PCA history with loss values."""
pca = PCA(n_components=2)
flat_history_pca = pca.fit_transform(flat_history)
R = np.max(np.abs(flat_history_pca), axis=0)
R *= scale
x = np.linspace(-R[0], R[0], n)
y = np.linspace(-R[1], R[1], n)
X, Y = np.meshgrid(x, y)
Z = np.zeros(X.shape)
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
xys = np.array([[X[i, j], Y[i, j]]])
w = pca.inverse_transform(xys)[0]
Z[i, j] = loss_w(w)
fig, ax = plt.subplots(figsize=(10, 20))
ax.set_title('Loss contour plot')
Zlog = np.log(Z)
extent = (-R[0], R[0], -R[1], R[1])
im = ax.imshow(Zlog, interpolation='bilinear', origin='lower',
cmap=cm.RdGy, extent=extent)
levels = np.linspace(np.min(Zlog), np.max(Zlog), 10)
CS = ax.contour(Zlog, levels, origin='lower', extend='both',
cmap='gray',
linewidths=2, extent=extent)
# make a colorbar for the contour lines
# CB = fig.colorbar(CS, shrink=0.8)
ax.clabel(CS, inline=True, fontsize=10)
# We can still add a colorbar for the image, too.
CBI = fig.colorbar(im, orientation='horizontal', shrink=0.8)
# l, b, w, h = ax.get_position().bounds
# ll, bb, ww, hh = CB.ax.get_position().bounds
# CB.ax.set_position([ll, b + 0.1*h, ww, h*0.8])
plt.plot(*zip(*flat_history_pca))
plt.scatter(*flat_history_pca[0], s=200, marker='<', color='blue', label='Start')
plt.scatter(*flat_history_pca[-1], s=200, marker='*', color='blue', label='End')
plt.legend()
return fig, ax
def get_mesh(scale=5, n=50):
"""Get a mesh of a given scale with a given number of points."""
# computing the mesh
xs = np.linspace(-scale, scale, n)
ys = np.linspace(-scale, scale, n)
xys = []
X = []
Y = []
for x in xs:
for y in ys:
xys.append((x, y))
X.append(x)
Y.append(y)
return xs, ys, xys, X, Y
@gin.configurable
def plot_3d(flat_history, loss_w, scale=5, n=50):
"""Plot the 3D loss landscape."""
pca = PCA(n_components=2)
flat_history_pca = pca.fit_transform(flat_history)
losses = [loss_w(w) for w in flat_history]
z_step_fraction = 0.1
R = np.max(np.linalg.norm(flat_history_pca, axis=1))
R *= scale
xs, ys, xys, X, Y = get_mesh(n=n, scale=R)
# computing values on the mesh
losses_mesh = []
for params in pca.inverse_transform(xys):
losses_mesh.append(loss_w(params))
Z = losses_mesh
Zmin = np.min(Z)
Zmax = np.max(Z)
Zstep = (Zmax - Zmin) * z_step_fraction
# Doing 3d plot
lighting = dict(ambient=0.4,
diffuse=1,
fresnel=4,
specular=0.5,
roughness=0.05)
lightposition = dict(x=0,
y=5,
z=min(10000, Zmax + 5))
trace2 = go.Scatter3d(x=flat_history_pca[:, 0], y=flat_history_pca[:, 1], z=losses,
marker=dict(size=4, color=losses, ),
line=dict(color='darkblue', width=2)
)
trace3 = go.Surface(x=xs, y=ys, z=np.array(Z).reshape(n, n).T, opacity=0.5,
contours_z=dict(show=True, usecolormap=True,
highlightcolor="limegreen", project_z=True),
lighting=lighting,
lightposition=lightposition
)
# Configure the layout.
layout = go.Layout(
margin={'l': 0, 'r': 0, 'b': 0, 't': 0}
)
data = [trace2, trace3]
plot_figure = go.Figure(data=data, layout=layout)
plot_figure.update_layout(scene=
dict(xaxis_title='PCA1',
yaxis_title='PCA2',
zaxis_title='loss'),
width=700,
margin=dict(r=20, b=10, l=10, t=10))
return plot_figure
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding: utf-8
from zencad import *
def section(w, h, l, t, d, d2):
return (
box(2 * t + w, t + l, 2 * t + h)
- box(w, l, h).translate(t, 0, t)
- box(w - 2 * d, l, h + 2 * t).translate(t + d, 0, 0)
- box(w, l + t, h - d2).translate(t, 0, d2 + t)
)
# n, m - параметры матрицы.
# w,h,l - параметры нишы.
# t - толщина стенок.
# d - выступ поддержки.
# d2 - высота заднего бампера.
def organizer(m, n, w, h, l, t, d, d2):
sect = section(w, h, l, t, d, d2)
line = union([sect.translate(j * (w + t), 0, 0) for j in range(0, m)])
arr = []
for i in range(0, n):
arr.append(line.up(i * (h + t)))
arr.append(box(w * m + t * (m + 1), l + t, t))
arr.append(box(w * m + t * (m + 1), l + t, t).up(n * (h + t)))
return union(arr)
if __name__ == "__main__":
m = organizer(3, 5, 27, 20, 64, 1.5, 5, 5)
display(m)
show()
|
nilq/baby-python
|
python
|
import uuid
import time
import hashlib
import json
def get_event_metadata():
return {
"run_id": str(uuid.uuid1()),
"event_id": str(uuid.uuid4())
}
# python does some pretty pretting to json objects, we have to change the
# separators to have a bare bone stringifying/dumping function, which will match other languages implementations
def generate_md5_hash_from_payload(payload:dict):
return hashlib.md5(json.dumps(payload, separators=(',', ':')).encode('utf-8')).hexdigest()
def generate_sha256_hash_from_payload(payload:dict):
return hashlib.sha256(json.dumps(payload, separators=(',', ':')).encode('utf-8')).hexdigest()
def enrich_valid_event(event, version, count):
metadata = get_event_metadata()
event['pipeline']['run_id'] = metadata['run_id']
event['event']['id'] = metadata['event_id']
event['data']['checksum_md5'] = generate_md5_hash_from_payload(event['data']['payload'])
event['data']['checksum_sha256'] = generate_sha256_hash_from_payload(event['data']['payload'])
event['reporter']['version'] = version
event['reporter']['sequence'] = count
event['reporter']['timestamp'] = round(time.time())
return event
|
nilq/baby-python
|
python
|
"""Tables Utilities"""
import logging
from typing import Dict
import numpy as np
import tensorflow as tf
from deepr.utils.field import TensorType
LOGGER = logging.getLogger(__name__)
class TableContext:
"""Context Manager to reuse Tensorflow tables.
Tensorflow does not have a ``tf.get_variable`` equivalent for
tables. The ``TableContext`` is here to provide this functionality.
Example
-------
>>> import deepr
>>> with deepr.utils.TableContext() as tables:
... table = deepr.utils.table_from_mapping(name="my_table", mapping={1: 2})
... tables.get("my_table") is table
True
>>> with deepr.utils.TableContext():
... table = deepr.utils.table_from_mapping(name="my_table", mapping={1: 2})
... reused = deepr.utils.table_from_mapping(name="my_table", reuse=True)
... table is reused
True
"""
_ACTIVE = None
def __init__(self):
if TableContext._ACTIVE is not None:
msg = "TableContext already active."
raise ValueError(msg)
TableContext._ACTIVE = self
self._tables = {}
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
def __contains__(self, name: str):
return name in self._tables
def close(self):
TableContext._ACTIVE = None
self._tables.clear()
def get(self, name: str):
if name not in self._tables:
msg = f"Table '{name}' not in tables. Did you forget a reuse=True?"
raise KeyError(msg)
return self._tables[name]
def set(self, name: str, table):
if name in self._tables:
msg = f"Table '{name}' already exists. Did you forget a reuse=True?"
raise ValueError(msg)
self._tables[name] = table
@classmethod
def is_active(cls):
return cls._ACTIVE is not None
@classmethod
def active(cls):
if cls._ACTIVE is None:
msg = "No active TableContext found. Wrap your code in a `with TableContext():`"
raise ValueError(msg)
return cls._ACTIVE
def table_from_file(name: str, path: str = None, key_dtype=None, reuse: bool = False, default_value: int = -1):
"""Create table from file"""
if reuse is True or (reuse is tf.AUTO_REUSE and name in TableContext.active()):
return TableContext.active().get(name)
else:
LOGGER.info(f"Creating table {name} from {path}")
if path is None:
raise ValueError("Path cannot be None")
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=path, name=name, key_dtype=key_dtype, default_value=default_value
)
if TableContext.is_active():
TableContext.active().set(name=name, table=table)
return table
def index_to_string_table_from_file(
name: str, path: str = None, vocab_size: int = None, default_value="UNK", reuse: bool = False
):
"""Create reverse table from file"""
if reuse is True or (reuse is tf.AUTO_REUSE and name in TableContext.active()):
return TableContext.active().get(name)
else:
LOGGER.info(f"Creating reverse table {name} from {path}")
if path is None:
raise ValueError("Path cannot be None")
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file=path, name=name, vocab_size=vocab_size, default_value=default_value
)
if TableContext.is_active():
TableContext.active().set(name=name, table=table)
return table
def table_from_mapping(
name: str, mapping: Dict = None, default_value=None, key_dtype=None, value_dtype=None, reuse: bool = False
):
"""Create table from mapping"""
if reuse is True or (reuse is tf.AUTO_REUSE and name in TableContext.active()):
return TableContext.active().get(name)
else:
LOGGER.info(f"Creating table {name} from mapping.")
if mapping is None:
raise ValueError("Mapping cannot be None")
# Convert mapping to arrays of keys and values
keys, values = zip(*mapping.items()) # type: ignore
keys_np = np.array(keys)
values_np = np.array(values)
# Infer default value if not given
if default_value is None:
default_value = TensorType(type(values_np[0].item())).default
# Infer types if not given
if key_dtype is None:
key_dtype = TensorType(type(keys_np[0].item())).tf
if value_dtype is None:
value_dtype = TensorType(type(values_np[0].item())).tf
# Create table
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
keys=keys_np, values=values_np, key_dtype=key_dtype, value_dtype=value_dtype
),
name=name,
default_value=default_value,
)
if TableContext.is_active():
TableContext.active().set(name=name, table=table)
return table
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from nbformat import v3, v4
import os
import sys
with open(sys.argv[1]) as f:
text = f.read()
nb = v3.reads_py(text)
nb = v4.upgrade(nb)
with open(os.path.splitext(sys.argv[1])[0] + ".ipynb", "w") as f:
f.write(v4.writes(nb))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
import functools
from .plot_manager import plot_manager
class Plot:
"""
Plotting function wrapper.
"""
def __init__(self, name, data_requirements: List, func):
self.plot_func = func
self.data_requirements = data_requirements
self.name = name
plot_manager.register(self)
def __call__(self, ax, data, *args, **kwargs):
if self.data_requirements is not None:
# If data check is enable, check data with requirements.
for requirement in self.data_requirements:
if requirement not in data:
raise RuntimeError(
f'Data requirement for {self.name} not satisfy: {requirement}'
)
self.plot_func(ax, data, *args, **kwargs)
def plot(name: str, data_requirements: List = None):
"""
Decoration to define a plot.
:param name:
Type name of this plot. Use in all configurations plot_figure.
:param data_requirements:
Data requirements.
This will be use to check input data before plotting.
Only work if data is subscriptable.
None if you want to disable this feature.
:return:
Decorator.
"""
def decorator(func):
plot = Plot(name, data_requirements, func)
# Copy docstring and function signature.
functools.update_wrapper(plot, func)
return plot
return decorator
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from branca.element import MacroElement
from jinja2 import Template
# generic base view
from django.views.generic import TemplateView
#folium
import folium
import geojson
from folium import plugins
import pandas as pd
from folium.plugins import MarkerCluster
# import ee
# from geemap import geojson_to_ee, ee_to_geojson
# from ipyleaflet import GeoJSON, Marker, MarkerCluster
# ee.Authenticate()
# ee.Initialize()
#forntend
#home
# import ee
service_account = 'cajulab@benin-cajulab-web-application.iam.gserviceaccount.com'
# credentials = ee.ServiceAccountCredentials(service_account, 'privatekey.json')
# ee.Initialize(credentials)
class FloatImage(MacroElement):
"""Adds a floating image in HTML canvas on top of the map."""
_template = Template("""
{% macro header(this,kwargs) %}
<style>
#{{this.get_name()}} {
position:absolute;
bottom:{{this.bottom}}%;
left:{{this.left}}%;
}
</style>
{% endmacro %}
{% macro html(this,kwargs) %}
<img id="{{this.get_name()}}" alt="float_image"
src="{{ this.image }}"
style="z-index: 999999"
width="200" height="85">
</img>
{% endmacro %}
""")
def __init__(self, image, bottom=75, left=75):
super(FloatImage, self).__init__()
self._name = 'FloatImage'
self.image = image
self.bottom = bottom
self.left = left
class home(TemplateView):
template_name = 'index.html'
# Define a method for displaying Earth Engine image tiles on a folium map.
def get_context_data(self, **kwargs):
figure = folium.Figure()
m = folium.Map(
location=[9.0, 2.4],
zoom_start=7,
)
m.add_to(figure)
plugins.Fullscreen(position='topright', title='Full Screen', title_cancel='Exit Full Screen', force_separate_button=False).add_to(m)
# alldept = ee.Image('users/ashamba/allDepartments_v0')
ben_nursery = pd.read_excel("./Data/Nurseries.xlsx",engine='openpyxl',)
ben_nursery['Commune'] = ben_nursery['Commune'].str.title()
ben_nursery['Owner'] = ben_nursery['Owner'].str.title()
#Drop nan columns
ben_nursery.drop(["Date","Provenance","Regarnissage", "Altitude", "Partenaire"], axis = 1, inplace = True)
ben_nursery.dropna(inplace=True)
marker_cluster = MarkerCluster(name="Benin-Nursery Information").add_to(m)
for i in range(len(ben_nursery)):
folium.Marker(location= [ben_nursery[i:i+1]['Latitude'].values[0], ben_nursery[i:i+1]['Longitude'].values[0]],
rise_on_hover=True,
rise_offset = 250,
icon = folium.Icon(color="red", icon="leaf"),
popup='''
<h4 style="font-family: 'Trebuchet MS', sans-serif">Commune Name: <b>{}</b></h4>
<h5 style="font-family: 'Trebuchet MS', sans-serif">Nursery Owner: <i>{}</i></h5>
<h5 style="font-family: 'Trebuchet MS', sans-serif">Nursery Area (ha): <b>{}</b></h5>
<h5 style="font-family: 'Trebuchet MS', sans-serif">Number of Plants: <b>{}</b></h5>
<a href="https://www.technoserve.org/our-work/agriculture/cashew/?_ga=2.159985149.1109250972.1626437600-1387218312.1616379774"target="_blank">click link to website</a>
<img src="https://gumlet.assettype.com/deshdoot/import/2019/12/tripXOXO-e1558439144643.jpg?w=1200&h=750&auto=format%2Ccompress&fit=max" width="200" height="70">
'''.format(ben_nursery[i:i+1].Commune.values[0], ben_nursery[i:i+1].Owner.values[0], ben_nursery[i:i+1]['Area (ha)'].values[0], ben_nursery[i:i+1]['Numebr of Plants'].values[0])).add_to(marker_cluster)
# alldept = ee.Image('srtm90_v4')
# benin_adm1 = ee.FeatureCollection("users/ashamba/BEN_adm1")
# benin_adm1_json = ee_to_geojson(benin_adm1)
with open("ben_adm1.json") as f:
benin_adm1_json = geojson.load(f)
with open("ben_adm2.json") as f:
benin_adm2_json = geojson.load(f)
# benin_adm2 = ee.FeatureCollection("users/ashamba/BEN_adm2")
# benin_adm2_json = ee_to_geojson(benin_adm2)
# dataset = ee.ImageCollection('MODIS/006/MOD13Q1').filter(ee.Filter.date('2019-07-01', '2019-11-30')).first()
# modisndvi = dataset.select('NDVI')
# visParams = {'min':0, 'max':3000, 'palette':['225ea8','41b6c4','a1dab4','034B48']}
# vis_paramsNDVI = {
# 'min': 0,
# 'max': 9000,
# 'palette': [ 'FE8374', 'C0E5DE', '3A837C','034B48',]}
# map_id_dict = ee.Image(modisndvi).getMapId(vis_paramsNDVI)
# folium.raster_layers.TileLayer(
# tiles = map_id_dict['tile_fetcher'].url_format,
# attr = 'Google Earth Engine',
# name = 'NDVI',
# overlay = True,
# control = True
# ).add_to(m)
# def add_ee_layer(self, ee_image_object, vis_params, name):
# map_id_dict = ee.Image(ee_image_object).getMapId(vis_params)
# folium.raster_layers.TileLayer(
# tiles=map_id_dict['tile_fetcher'].url_format,
# attr='Map Data © <a href="https://earthengine.google.com/">Google Earth Engine</a>',
# name=name,
# overlay=True,
# control=True
# ).add_to(self)
# folium.Map.add_ee_layer = add_ee_layer
# m.add_ee_layer(alldept, {'min':0, 'max': 4, 'palette': "black, green, white, gray"}, 'Benin-Caju Prediction')
# json_layer_ben = folium.GeoJson(data=benin_adm1_json, name='Benin States JSON')
def highlight_function(feature):
return {"fillColor": "#ffaf00", "color": "green", "weight": 3, "dashArray": "1, 1"}
g = folium.GeoJson(data=benin_adm1_json,
name='Benin-Adm1 Department',
highlight_function = highlight_function)
g1 = folium.GeoJson(data=benin_adm2_json,
name='Benin-Adm2 Communes',
highlight_function = highlight_function)
# m.add_child(json_layer_ben)
folium.GeoJsonTooltip(fields=["NAME_1"],
aliases = ["Dep't name:"],
labels = False,
sticky = False,
style=("background-color: white; color: black; font-family: sans-serif; font-size: 12px; padding: 4px;")
).add_to(g)
folium.GeoJsonTooltip(fields=["NAME_2"],
aliases = ["Commune name:"],
labels = False,
sticky = False,
style=("background-color: white; color: black; font-family: sans-serif; font-size: 12px; padding: 4px;")
).add_to(g1)
g.add_to(m)
g1.add_to(m)
value1="https://www.diversityjobs.com/wp-content/uploads/2020/12/technoserve-logo.png"
value2 = "http://www.tnsbenin.org/uploads/1/0/9/8/109816790/logo-cajulab-jpg_orig.jpg"
FloatImage(value1, bottom=78, left=2).add_to(m)
FloatImage(value2, bottom=87, left=2).add_to(m)
m.add_child(folium.LayerControl())
figure.render()
# print('test')
return {"map": figure}
|
nilq/baby-python
|
python
|
from operator import attrgetter
from typing import Set
import pandas as pd
from investmentstk.models.bar import Bar
BarSet = Set[Bar]
def barset_from_csv_string(csv_string: str) -> BarSet:
"""
Expected format:
date,open,high,low,close
without headers
"""
barset = set()
rows = csv_string.strip().split("\n")
for row in rows:
time, open, high, low, close = [value.strip() for value in row.split(",")]
bar = Bar(time=time, open=open, high=high, low=low, close=close) # type: ignore
barset.add(bar)
return barset
def barset_to_ohlc_dataframe(barset: BarSet) -> pd.DataFrame:
"""
Converts a set of bars into a dataframe.
The dataframe is indexed by date and each component of the bar (OHLC) becomes a column.
Useful for calculations that require access to more than one component of an asset.
"""
dataframe = pd.DataFrame(barset)
return format_ohlc_dataframe(dataframe)
def format_ohlc_dataframe(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
Useful for dependencies that already provide OHLC data in a dataframe.
Converts it to our format.
"""
dataframe = dataframe.set_index(pd.DatetimeIndex(dataframe["time"]))
dataframe = dataframe.drop("time", axis=1)
dataframe = dataframe.sort_index()
return dataframe
def ohlc_to_single_column_dataframe(dataframe: pd.DataFrame, asset, column: str = "close") -> pd.DataFrame:
"""
Converts a set of bars into a single column dataframe using `column` (like the close price) as the values.
The dataframe is indexed by date and the column is named after the asset's name.
Useful for converting barsets of several different assets into dataframes that will be merged
together.
"""
dataframe = dataframe[[column]] # Use only the close price
dataframe = dataframe.rename(columns={column: asset.name})
dataframe.index = dataframe.index.date
dataframe = dataframe.sort_index()
return dataframe
def barset_to_sorted_list(barset: BarSet) -> list[Bar]:
return sorted(list(barset), key=attrgetter("time"))
|
nilq/baby-python
|
python
|
import re
from typing import Optional
from pydantic import BaseModel, validator
class InputQuery(BaseModel):
dataStructureName: str
version: str
population: Optional[list]
include_attributes: Optional[bool] = False
@validator('version')
def check_for_sem_ver(cls, version):
pattern = re.compile(r"^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)$")
if not pattern.match(version):
raise ValueError(
f"==> version {version} is not a valid semantic version."
)
return version
class InputTimePeriodQuery(InputQuery):
startDate: int
stopDate: int
class InputTimeQuery(InputQuery):
date: int
class InputFixedQuery(InputQuery):
pass
|
nilq/baby-python
|
python
|
from django.db import models
class ResourceGroupTextAttributeDefinition(models.Model):
class Meta:
unique_together = ('name', 'resource_group',)
name = models.CharField(max_length=100,
blank=False)
resource_group = models.ForeignKey('ResourceGroup',
on_delete=models.CASCADE,
related_name='text_attribute_definitions',
related_query_name='text_attribute_definition',
null=True)
help_text = models.CharField(max_length=100, default='', null=True, blank=True)
def edit(self, name, help_text):
self.name = name
self.help_text = help_text
self.save()
def __str__(self):
return f"{self.resource_group} - {self.name}"
|
nilq/baby-python
|
python
|
from pathlib import Path
from tempfile import TemporaryDirectory
import json
import os
import unittest
from moto import mock_s3
from schematics.exceptions import DataError
import boto3 as boto
from hidebound.exporters.s3_exporter import S3Config, S3Exporter
# ------------------------------------------------------------------------------
class S3ConfigTests(unittest.TestCase):
def setUp(self):
self.config = dict(
access_key='foo',
secret_key='bar',
bucket='bucket',
region='us-west-2',
)
def test_validate(self):
S3Config(self.config).validate()
def test_bucket(self):
self.config['bucket'] = 'BadBucket'
with self.assertRaises(DataError):
S3Config(self.config).validate()
def test_region(self):
self.config['region'] = 'us-west-3'
with self.assertRaises(DataError):
S3Config(self.config).validate()
# ------------------------------------------------------------------------------
class S3ExporterTests(unittest.TestCase):
@mock_s3
def setUp(self):
self.config = dict(
access_key='foo',
secret_key='bar',
bucket='bucket',
region='us-west-2',
)
self.s3 = boto.session.Session(
aws_access_key_id=self.config['access_key'],
aws_secret_access_key=self.config['secret_key'],
region_name=self.config['region'],
).resource('s3')
self.bucket = self.s3.Bucket(self.config['bucket'])
@mock_s3
def test_from_config(self):
result = S3Exporter.from_config(self.config)
self.assertIsInstance(result, S3Exporter)
@mock_s3
def test_init(self):
S3Exporter(**self.config)
buckets = self.s3.buckets.all()
buckets = [x.name for x in buckets]
self.assertIn(self.config['bucket'], buckets)
self.assertEqual(len(buckets), 1)
@mock_s3
def test_init_with_bucket(self):
result = list(self.s3.buckets.all())
self.assertEqual(result, [])
S3Exporter(**self.config)
buckets = self.s3.buckets.all()
buckets = [x.name for x in buckets]
self.assertIn(self.config['bucket'], buckets)
self.assertEqual(len(buckets), 1)
S3Exporter(**self.config)
buckets = self.s3.buckets.all()
buckets = [x.name for x in buckets]
self.assertIn(self.config['bucket'], buckets)
self.assertEqual(len(buckets), 1)
@mock_s3
def test_export_asset(self):
exporter = S3Exporter(**self.config)
id_ = 'abc123'
expected = dict(asset_id=id_, foo='bar')
exporter._export_asset(expected)
with TemporaryDirectory() as root:
file_ = Path(root, f'{id}.json')
with open(file_, 'wb') as f:
self.bucket.download_fileobj(
f'hidebound/metadata/asset/{id_}.json',
f
)
with open(file_, 'r') as f:
self.assertEqual(json.load(f), expected)
@mock_s3
def test_export_content(self):
with TemporaryDirectory() as root:
n = 'p-proj001_spec001_d-desc_v001'
rel_path = f'projects/proj001/spec001/{n}/{n}_f0000.json'
filepath = Path(root, rel_path)
content = {'foo': 'bar'}
os.makedirs(filepath.parent, exist_ok=True)
with open(Path(root, filepath), 'w') as f:
json.dump(content, f)
exporter = S3Exporter(**self.config)
id_ = 'abc123'
expected = dict(
file_id=id_,
foo='bar',
filepath=filepath.as_posix(),
filepath_relative=rel_path,
)
exporter._export_content(expected)
# content
file_ = Path(root, 'content.json')
with open(file_, 'wb') as f:
self.bucket.download_fileobj(f'hidebound/content/{rel_path}', f)
with open(file_, 'r') as f:
self.assertEqual(json.load(f), content)
@mock_s3
def test_export_file(self):
with TemporaryDirectory() as root:
n = 'p-proj001_spec001_d-desc_v001'
rel_path = f'projects/proj001/spec001/{n}/{n}_f0000.json'
filepath = Path(root, rel_path)
content = {'foo': 'bar'}
os.makedirs(filepath.parent, exist_ok=True)
with open(Path(root, filepath), 'w') as f:
json.dump(content, f)
exporter = S3Exporter(**self.config)
id_ = 'abc123'
expected = dict(
file_id=id_,
foo='bar',
filepath=filepath.as_posix(),
filepath_relative=rel_path,
)
exporter._export_file(expected)
# metadata
file_ = Path(root, 'metadata.json')
with open(file_, 'wb') as f:
self.bucket.download_fileobj(
f'hidebound/metadata/file/{id_}.json',
f
)
with open(file_, 'r') as f:
self.assertEqual(json.load(f), expected)
@mock_s3
def test_export_asset_chunk(self):
exporter = S3Exporter(**self.config)
expected = [
dict(foo='bar'),
dict(pizza='taco'),
]
exporter._export_asset_chunk(expected)
keys = [x.key for x in self.bucket.objects.all()]
key = list(filter(lambda x: 'asset-chunk' in x, keys))[0]
with TemporaryDirectory() as root:
temp = Path(root, 'temp.json')
with open(temp, 'wb') as f:
self.bucket.download_fileobj(key, f)
with open(temp, 'r') as f:
self.assertEqual(json.load(f), expected)
@mock_s3
def test_export_file_chunk(self):
exporter = S3Exporter(**self.config)
expected = [
dict(foo='bar'),
dict(pizza='taco'),
]
exporter._export_file_chunk(expected)
keys = [x.key for x in self.bucket.objects.all()]
key = list(filter(lambda x: 'file-chunk' in x, keys))[0]
with TemporaryDirectory() as root:
temp = Path(root, 'temp.json')
with open(temp, 'wb') as f:
self.bucket.download_fileobj(key, f)
with open(temp, 'r') as f:
self.assertEqual(json.load(f), expected)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
##-------------------------------------------------------------------
## @copyright 2017 DennyZhang.com
## Licensed under MIT
## https://www.dennyzhang.com/wp-content/mit_license.txt
##
## File : refresh_containers.py
## Author : Denny <contact@dennyzhang.com>
## Description : Restart a list of docker containers.
## If required, related docker images will be updated.
## Requirements:
## pip install docker==2.0.0
## --
## Created : <2017-05-12>
## Updated: Time-stamp: <2017-09-07 21:36:07>
##-------------------------------------------------------------------
import sys
import docker
def pull_image_by_container(client, container_name):
container = None
try:
container = client.containers.get(container_name)
except docker.errors.NotFound as e:
print("Error: No container is found with name of %s" % (container_name))
sys.exit(1)
docker_image = container.attrs['Config']['Image']
print("docker pull %s" % (docker_image))
client.images.pull(docker_image)
if __name__ == '__main__':
client = docker.from_env()
container_name = "my-test"
pull_image_by_container(client, container_name)
## File : refresh_containers.py ends
|
nilq/baby-python
|
python
|
from barcode import Code128
from svgwrite import cm, mm, px
import svgwrite
Drawing = svgwrite.Drawing
class Size():
def __init__(self, w, h, unit='px'):
self.width = w
self.height = h
self.unit = unit
def getS(self, float_precision=2):
"""String Size -> width[unit], height[unit]"""
FMT = "{:0." + str(float_precision) + "f}{}" if float_precision > 0 else "{}{}"
#print(FMT)
return [ FMT.format(s, self.unit) for s in self.getN ]
@property
def getN(self):
return self.width, self.height
def calculate_size(code, size):
w, h = size
margin = int(w/5)
rest = w - margin
m_width = rest / len(code)
modules = []
cx = margin / 2
mlist = []
idx = 0
while(True):
if code[idx] == '1':
c = 1
while(idx + 1 < len(code)):
if code[idx + 1] == '0':
break
c += 1
idx += 1
mlist.append([1, c])
else :
c = 1
while(idx + 1 < len(code)):
if code[idx + 1] == '1':
break
c += 1
idx += 1
mlist.append([0, c])
if idx + 1 >= len(code):
break
idx += 1
xpos = cx
for r, c in mlist:
if r:
modules.append(((xpos, '0'), ( m_width * c, h )))
xpos += c * m_width
return (w, h), modules
def get_modules(code):
''''11100100001011' -> [[1,3],[0,2],[1,1],[0,4],[1,1],[0,1],[1,2]]'''
mlist = []
idx = 0
while(True):
if code[idx] == '1':
c = 1
while(idx + 1 < len(code)):
if code[idx + 1] == '0':
break
c += 1
idx += 1
mlist.append([1, c])
else :
c = 1
while(idx + 1 < len(code)):
if code[idx + 1] == '1':
break
c += 1
idx += 1
mlist.append([0, c])
if idx + 1 >= len(code):
break
idx += 1
return mlist
def test1():
code = Code128("Test_Code128").build()[0]
size, mod = calculate_size( code, (320, 140) )
print(size)
canvas = Drawing(filename="My.svg", size=( 320, 140 ))
barcode = canvas.add(canvas.g(id='barcode_g'))
barcode.add(canvas.rect( (0, 0), size, fill='white'))
bc_part = barcode.add(canvas.g(id='bc_part'))
for i in mod:
bc_part.add(canvas.rect(i[0], i[1], fill='black'))
canvas.save(pretty=True)
def test2():
size = Size( 120 * 3.125, 55 * 3.125 ) # in 90dpi -> ' * 3.125 is for 300dpi
code = Code128("Holis_NHK").build()[0]
modules = get_modules(code)
Canv = Drawing(filename="Draw.svg", size = size.getS(float_precision=2))
barcode = Canv.add(Canv.g(id="SomeID"))
barcode.add(Canv.rect(('0','0'),size.getS(float_precision=2), fill='white'))
xpos = int(size.getN[0] / 10)
width = (size.getN[0] - ( size.getN[0] / 5)) / len(code)
for ch, n in modules:
pos = Size(xpos, 0)
ms = Size(n * width, size.getN[1] * 7.5 / 10)
if ch:
barcode.add(Canv.rect((pos.getS()),(ms.getS()), fill="black"))
xpos += ( n * width )
barcode.add(Canv.text("Holis_NHK",x=["187.5px"], y=["162.5px"], style="fill:black;font-size:25pt;text-anchor:middle;"))
Canv.save(pretty=True)
def test3(c):
from defs import CustomWriter
from barcode import Gs1_128
c = Gs1_128(c)
c.writer = CustomWriter()
c.save(c)
if __name__ == "__main__":
import sys
code = sys.argv[1]
test3(code)
|
nilq/baby-python
|
python
|
"""
Simple driver for Monarch GO AT modemcontrol commands
"""
from time import sleep
from logging import getLogger
from ..provisioner import ProvisionerError
ASCII_EOT = b'\x04'
class AtDriver():
"""
Low-level AT modem command driver.
"""
def __init__(self, fwinterface):
"""
Connstructor. Will enter bridge mode. Protocol port must be opened by caller.
:param fwinterface: Firmware interface object
"""
self.logger = getLogger(__name__)
self.bridge_mode = False
self.fwinterface = fwinterface
self.com = self.fwinterface.get_comport_handle()
self.enter_bridge_mode()
def __del__(self):
self.exit_bridge_mode()
def __enter__(self):
self.enter_bridge_mode()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit_bridge_mode()
def enter_bridge_mode(self):
"""
Need to manage bridge state internally because pykitcommanader doesn't yet.
"""
self.logger.debug("enter_bridge_mode")
if self.bridge_mode:
self.logger.debug("Already in bridge mode")
return
# The blue LED used to indicate bridge mode (ie we're talking to the modem)
self.fwinterface.set_led_status("CELL", "ON")
response = self.fwinterface.enter_bridge_mode()
if response == "":
self.bridge_mode = True
# Wait for modem being ready after reset
self.read_until(b"+SYSSTART", retries=2, timeout=1)
# Flush any garbage the modem might still have in store
garbage = self.com.read(self.com.in_waiting)
if garbage:
self.logger.debug("Garbage from modem: %s", garbage)
self.ping() # Sanity check - this should not fail
else:
self.fwinterface.set_led_status("ERR", "ON")
raise ProvisionerError("Enter bridge mode failed, response: {}".format(response))
def exit_bridge_mode(self):
"""
Exit bridge mode.
"""
self.logger.debug("exit_bridge_mode")
if not self.bridge_mode:
self.logger.debug("Already out of bridge mode")
return
response = self.fwinterface.exit_bridge_mode()
if response == "":
sleep(.3) # Wait for any garbage chars after switching mode
self.bridge_mode = False
self.fwinterface.set_led_status("CELL", "OFF")
else:
self.fwinterface.set_led_status("ERR", "ON")
raise ProvisionerError("Exit bridge mode failed, response: {}".format(response))
def ping(self):
"""
Send 'AT' command to modem and check response
:return: True if modem responds "OK"
"""
if self.bridge_mode:
response = self.command("AT")
if response[-1] == "OK":
return True
raise ProvisionerError("Modem ping failed, response: {}".format(response))
raise ProvisionerError("Modem ping attempted when not in bridge mode")
def read_response(self):
"""
Read response from modem. Response can be multiple lines either
ended with "OK\\r\\n", "ERROR\\r\\n", or '>' so a simple read_until
won't do. Returns list of response lines, blank lines and
CR-LF stripped.
"""
lines = []
while True:
line = self.com.read_until(b'\r\n')
if not line:
lines.append("ERROR: Timeout")
return lines
if line != b'\r\n':
lines.append(line[0:-2].decode("utf-8", "ignore"))
if line[0:2] == b"OK" or b"ERROR" in line:
return lines
def read_until(self, string, expect=b'\r\n', retries=1, timeout=None):
"""
Read complete lines until a line containing string is read.
Can be used to wait for expected URCs after a given command.
:param string: string to wait for
:param expect: Optional character to read until if not whole line read
:param retries: Number of times to retry after timeout waiting for string before giving up
:return: list of response lines.
"""
# TODO: extend to do regular expression matching.
lines = []
tm = self.com.timeout
if timeout:
self.com.timeout = timeout
while True:
line = self.com.read_until(expect)
if not line:
# For situations where the comm timeout is not enough.
retries -= 1
if retries > 0:
continue
lines.append("ERROR: Timeout")
self.com.timeout = tm
return lines
if line != b'\r\n': # Strip blank lines
if line.endswith(b'\r\n'):
lines.append(line[0:-2].decode("utf-8", "ignore"))
else:
lines.append(line.decode("utf-8", "ignore"))
if string in line:
self.com.timeout = tm
return lines
def command(self, cmd, payload=None):
"""
Send simple AT command.
:param cmd: Pre-formatted command.
:param payload: Optional payload sent in separate line. Payload length is appended
as argument to cmd. Payload == "" will append payload length argument while None will not.
(used for erase in AT+SQNSNVW command)
:return: sanitized response (list of lines) Last line will be "OK" or "ERROR"
"""
if payload is None:
self.logger.debug(cmd)
self.com.write((cmd + '\r').encode())
else:
self.logger.debug("%s,%d", cmd, len(payload))
self.com.write((cmd + ",{}\r".format(len(payload))).encode())
if len(payload) > 0:
self.com.read_until(b'>')
self.com.write(payload)
response = self.read_response()
self.logger.debug(response)
return response
def write_nvm(self, datatype, slot, data=None, cmd="AT+SQNSNVW"):
"""
Write data to NVM. Requires special handling because Sequans modem requires
certificate PEM files use '\\n' line endings and line ending not missing on last line.
:param cmd: "AT+SQNSNVW" (maybe others as well?)
:param datatype: "certificate", "privatekey", or "strid" (don't know what the latter is used for)
:param slot: 0-19, 0-5 for "strid"
:param data: data to write. None/empty => erase slot.
"""
if not datatype in ["certificate", "privatekey"]:
raise ValueError(f"Invalid data type for NVM write: {datatype}")
if data:
data = data.replace(b'\r\n', b'\n') # Get rid of CR-LF line endings if present
# Sequans modem requires PEM input ends with newline
if not data.endswith(b'\n'):
self.logger.warning("missing newline at end of data, appending")
data += b'\n'
else:
data = b''
response = self.command(cmd + f'="{datatype}",{slot}', data)
if response[-1] != "OK":
raise ProvisionerError(f"Write {datatype} to NVM failed, response: {response}")
def reset(self):
"""
Software-reset modem, wait for startup to complete
"""
response = self.command("AT^RESET")
if response[-1] == "OK":
self.read_until(b'+SYSSTART')
return
raise ProvisionerError("Reset modem failed")
|
nilq/baby-python
|
python
|
import abc
import filecmp
import inspect
import os
import shutil
from ctranslate2.specs import catalog
from ctranslate2.specs.model_spec import ModelSpec
def _list_specs():
return {
symbol: getattr(catalog, symbol)
for symbol in dir(catalog)
if inspect.isclass(getattr(catalog, symbol)) and not symbol.startswith("_")
}
class Converter(abc.ABC):
@staticmethod
def declare_arguments(parser):
parser.add_argument(
"--output_dir", required=True, help="Output model directory."
)
parser.add_argument(
"--model_spec",
required=True,
choices=list(_list_specs().keys()),
help="Type of model to convert.",
)
parser.add_argument(
"--vocab_mapping", default=None, help="Vocabulary mapping file (optional)."
)
parser.add_argument(
"--quantization",
default=None,
choices=["int8", "int16", "float16"],
help="Weight quantization type.",
)
parser.add_argument(
"--force",
action="store_true",
help="Force conversion even if the output directory already exists.",
)
return parser
def convert_from_args(self, args):
return self.convert(
args.output_dir,
args.model_spec,
vmap=args.vocab_mapping,
quantization=args.quantization,
force=args.force,
)
def convert(
self, output_dir, model_spec, vmap=None, quantization=None, force=False
):
if os.path.exists(output_dir) and not force:
raise RuntimeError(
"output directory %s already exists, use --force to override"
% output_dir
)
if isinstance(model_spec, str):
spec_class = _list_specs()[model_spec]
model_spec = spec_class()
if not isinstance(model_spec, ModelSpec):
raise TypeError("model_spec should extend ctranslate2.specs.ModelSpec")
try:
src_vocab, tgt_vocab = self._load(model_spec)
except NotImplementedError:
raise NotImplementedError(
"This converter does not support the model %s" % model_spec
)
model_spec.validate()
self._check_vocabulary_size(
"source", src_vocab, model_spec.source_vocabulary_size
)
self._check_vocabulary_size(
"target", tgt_vocab, model_spec.target_vocabulary_size
)
model_spec.optimize(quantization=quantization)
# Create model directory.
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
model_spec.serialize(os.path.join(output_dir, "model.bin"))
if vmap is not None:
shutil.copy(vmap, os.path.join(output_dir, "vmap.txt"))
src_vocab_path = os.path.join(output_dir, "source_vocabulary.txt")
tgt_vocab_path = os.path.join(output_dir, "target_vocabulary.txt")
self._save_vocabulary(src_vocab, src_vocab_path)
self._save_vocabulary(tgt_vocab, tgt_vocab_path)
# For shared vocabularies, keep a single file in the model directory.
if filecmp.cmp(src_vocab_path, tgt_vocab_path, shallow=False):
os.remove(tgt_vocab_path)
os.rename(src_vocab_path, os.path.join(output_dir, "shared_vocabulary.txt"))
return output_dir
@abc.abstractmethod
def _load(self, model_spec):
raise NotImplementedError()
@abc.abstractmethod
def _save_vocabulary(self, vocab, destination):
raise NotImplementedError()
def _vocabulary_size(self, vocab):
"""Returns the vocabulary size.
When defined, this enables additional error checking when converting models.
"""
return None
def _check_vocabulary_size(self, name, vocab, expected_size):
"""Raises an exception if expected and actual vocabulary sizes are known but
do not match.
"""
if expected_size is None:
return
vocab_size = self._vocabulary_size(vocab)
if vocab_size is None:
return
if vocab_size != expected_size:
raise ValueError(
"%s vocabulary has size %d but the model expected a vocabulary "
"of size %d" % (name.capitalize(), vocab_size, expected_size)
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
def base(request):
return render(request, 'base.html', {})
def report(request):
context = {'title': '报表平台'}
return render(request, 'reports/report.html', context)
def histogram(request):
"""柱状图"""
context = {'title': '柱状图'}
return render(request, 'reports/histogram.html', context)
def rack(request):
"""机柜布置图"""
context = {'title': '机柜布置图'}
return render(request, 'reports/rack.html', context)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
hostname = 'localhost'
username = 'root'
password = ''
database = 'pythonconn'
# Simple routine to run a query on a database and print the results:
def doQuery( conn ) :
cur = conn.cursor()
''' first='Shreyas'
last='Patil'
user='Flamestriker'
passw='12345'
'''
# cur.execute( "SELECT uname, pname FROM employee" )
cur.execute( "INSERT INTO `employee` VALUES ({0},{1},{2},{3})".format('Shreyas', 'Patil', 'Flamestriker', '12345'))
cur.execute( "SELECT fname, lname, uname, pname FROM employee WHERE fname={0}".format('Shreyas') )
for firstname, lastname, username, password in cur.fetchall() :
print (firstname, lastname, username, password)
print ("Using pymysql…")
import pymysql
myConnection = pymysql.connect( host=hostname, user=username, passwd=password, db=database )
doQuery( myConnection )
myConnection.close()
print ("Connection Successful!!!")
|
nilq/baby-python
|
python
|
# Processando uma resposta de API
import requests
# Faz uma chamada de API e armazena a resposta
url = 'https://api.github.com/search/repositories?\
q=language:python&sorts=stars'
r = requests.get(url)
print('Status code:', r.status_code)
# Armazena a resposta da API em uma variável
response_dict = r.json()
print('Total repositories:', response_dict['total_count'])
# Explora informações sobre os repositórios
repo_dicts = response_dict['items']
print('Repositories returned:', len(repo_dicts))
# Analisa vários repositórios
print('\nSelected information about each repository:')
for repo_dict in repo_dicts:
print('\nName:', repo_dict['name'])
print('Owner:', repo_dict['owner']['login'])
print('Stars:', repo_dict['stargazers_count'])
print('Reposistory:', repo_dict['html_url'])
print('Created:', repo_dict['created_at'])
print('Updated:', repo_dict['updated_at'])
print('Description:', repo_dict['description'])
|
nilq/baby-python
|
python
|
import json, os
import copy
class sesh:
def __init__(self, sessionUUID, TotalEvents, VersionControlEvents, EditEvents, CommandEvents, DocumentEvents, ActivityEvents, NavigationEvents, TestRunEvents, WindowEvents, CompletionEvents, SystemEvents, DebuggerEvents, SolutionEvents, IDEStateEvents, UndefinedEvents):
self.sessionUUID = ""
self.TotalEvents = 0
self.VersionControlEvents = 0
self.EditEvents = 0
self.CommandEvents = 0
self.DocumentEvents = 0
self.ActivityEvents = 0
self.NavigationEvents = 0
self.TestRunEvents = 0
self.WindowEvents = 0
self.CompletionEvents = 0
self.SystemEvents = 0
self.DebuggerEvents = 0
self.SolutionEvents = 0
self.IDEStateEvents = 0
self.UndefinedEvents = 0
def main():
base_path = 'E:/ESEData'
based_path = 'E:/ESEData/'
files = [pos_json for pos_json in os.listdir(base_path) if pos_json.endswith('.json')]
print(len(files), " files loaded.")
eventType = ""
sessions = {}
graphsessions = {}
sessiontracker = []
existingUUID = []
session = sesh("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
for i in range(0, len(files)):
data = open(based_path + files[i])
data2 = json.load(data)
eventType = data2['$type'].split(',')[0].split('.')[-1]
fileSessionUUID = data2['IDESessionUUID']
if (fileSessionUUID != session.sessionUUID and fileSessionUUID not in existingUUID): # if file's UUID is not the previous one, and has not been seen before, create new session
graphsessions[session.sessionUUID] = copy.deepcopy(sessiontracker) # put away working session into graphsessions
sessions[session.sessionUUID] = session
session = sesh("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
session.sessionUUID = fileSessionUUID
existingUUID.append(session.sessionUUID)
sessiontracker.clear()
# print("Creating new session ", fileSessionUUID)
# print(existingUUID)
# print(sessions)
elif (fileSessionUUID != session.sessionUUID and fileSessionUUID in existingUUID): # if file's UUID is not the previous one, but has been seen before, use the old one
for j in sessions:
if sessions[j].sessionUUID == fileSessionUUID:
# print("Found duplicate session ", fileSessionUUID)
sessions[session.sessionUUID] = session
session = sessions[j] # then set the current session to the old matched session
# print(sessions[j].TotalEvents, sessions[j].sessionUUID)
sessiontracker = graphsessions[session.sessionUUID] # set session tracker to graph sessions
break
session.TotalEvents += 1
if (eventType == "VersionControlEvent"):
session.VersionControlEvents += 1
elif (eventType == "EditEvent"):
session.EditEvents += 1
elif (eventType == "CommandEvent"):
session.CommandEvents += 1
elif (eventType == "DocumentEvent"):
session.DocumentEvents += 1
elif (eventType == "ActivityEvent"):
session.ActivityEvents += 1
elif (eventType == "NavigationEvent"):
session.NavigationEvents += 1
elif (eventType == "TestRunEvent"):
session.TestRunEvents += 1
elif (eventType == "WindowEvent"):
session.WindowEvents += 1
elif (eventType == "CompletionEvent"):
session.CompletionEvents += 1
elif (eventType == "SystemEvent"):
session.SystemEvents += 1
elif (eventType == "DebuggerEvent"):
session.DebuggerEvents += 1
elif (eventType == "SolutionEvent"):
session.SolutionEvents += 1
elif (eventType == "IDEStateEvent"):
session.IDEStateEvents += 1
else:
session.UndefinedEvents += 1
sessiontracker.append(session)
graphsessions[session.sessionUUID] = copy.deepcopy(sessiontracker)
data.close()
i += 1
sessions.pop("")
graphsessions.pop("")
for i in graphsessions:
if len(graphsessions[i]) > 10:
for j in graphsessions[i]:
# print(i, len(graphsessions[i]), graphsessions[i][0].sessionUUID, graphsessions[i][0])
print(i, j.TotalEvents, j.VersionControlEvents, j.EditEvents, j.CommandEvents, j.DocumentEvents, j.ActivityEvents, j.NavigationEvents, j.TestRunEvents, j.WindowEvents, j.CompletionEvents, j.SystemEvents, j.DebuggerEvents, j.SolutionEvents, j.IDEStateEvents, j.UndefinedEvents, file=open("stackplot.txt", "a"))
sessions.pop("")
for i in sessions:
if sessions[i].TotalEvents > 0:
print("Session ID: ", sessions[i].sessionUUID, " Number of Events: ", sessions[i].TotalEvents, "\n Version Control: ", sessions[i].VersionControlEvents, "\n Edit: ", sessions[i].EditEvents, "\n Command: ", sessions[i].CommandEvents, "\n Document: ", \
sessions[i].DocumentEvents, "\n Activity: ", sessions[i].ActivityEvents, "\n Navigation: ",sessions[i].NavigationEvents, "\n Test Run: ",sessions[i].TestRunEvents, "\n Window: ",sessions[i].WindowEvents, "\n Completion: ",sessions[i].CompletionEvents, \
"\n System: ", sessions[i].SystemEvents, "\n Debugger: ",sessions[i].DebuggerEvents, "\n Solution: ",sessions[i].SolutionEvents, "\n IDE State: ",sessions[i].IDEStateEvents, "\n Undefined: ",sessions[i].UndefinedEvents ,file=open("output.txt", "a")) # ,file=open("output.txt", "a")
counter = 0
for j in sessions:
# print(sessions[j].sessionUUID, sessions[j].TotalEvents)
counter += sessions[j].TotalEvents
print(len(sessions), " sessions", file=open("output.txt", "a"))
print(len(existingUUID), " sessionUUIDs", file=open("output.txt", "a"))
print(counter, " jsons looked at", file=open("output.txt", "a"))
print(len(graphsessions))
print(len(sessions))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, masonarmani38@gmail.com and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestProductionWaste(unittest.TestCase):
pass
def tear_down():
filters =dict({
"from" : "01-09-2017 17:49:55",
"to":"01-12-2017 17:49:55",
"production_order":"GCL-PRO-17-00006"
})
conditions = ""
from datetime import datetime
froms = unicode.split(filters.get("from")," ")
tos = unicode.split(filters.get("to")," ")
filters['to']=datetime.strptime(tos[0], "%d-%m-%Y").strftime("%Y-%m-%d")+" "+ tos[1]
filters['from']=datetime.strptime(froms[0], "%d-%m-%Y").strftime("%Y-%m-%d")+" "+froms[1]
if filters.get('production_order'):
conditions = " and p.production_order='{production_order}'"
if filters.get('to') and filters.get("from"):
conditions += " and (p.planned_start_date between DATE('{from}') and DATE('{to}'))"
data = frappe.db.sql(
"SELECT p.production_order, p.planned_start_date, c.item_code, c.item_name, c.item_uom, c.actual "
"p,destination_warehouse , c.waste FROM `tabProduction Waste` p JOIN "
"`tabProduction Waste Manufactured Items` c ON (c.parent = p.name) "
"WHERE (1=1) {cond}".format(cond=conditions.format(**filters)), as_list=1)
print "SELECT p.production_order, p.planned_start_date, c.item_code, c.item_name, c.item_uom, c.actual "\
"p,destination_warehouse , c.waste FROM `tabProduction Waste` p JOIN "\
"`tabProduction Waste Manufactured Items` c ON (c.parent = p.name) "\
"WHERE (1=1) {cond}".format(cond=conditions.format(**filters))
print data
return
allowable_waste = frappe.get_single("Production Waste Setup")
frappe.errprint(allowable_waste.allowable_waste)
production_order = "GCL-PRO-17-00006"
print get_excess(production_order=production_order)
print get_production_items(production_order=production_order)
print get_manufactured_items(production_order=production_order)
def get_production_items(production_order=None):
if production_order:
stock_entry_details = frappe.db.sql("""select sd.qty, sd.item_name , sd.item_code , sd.uom from `tabStock Entry` s JOIN
`tabStock Entry Detail` sd ON s.name = sd.parent WHERE s.production_order = '%s'
and s.purpose = "Material Transfer for Manufacture" """ %
production_order, as_list=1)
return stock_entry_details
return []
def get_manufactured_items(production_order=None):
if production_order:
stock_entry_details = frappe.db.sql("""select sd.qty, sd.item_name , sd.item_code , sd.uom from `tabStock Entry` s JOIN
`tabStock Entry Detail` sd ON s.name = sd.parent WHERE s.production_order = '%s'
and s.purpose = "Manufacture" and sd.t_warehouse != "" GROUP BY s.production_order"""
% production_order, as_list=1)
return stock_entry_details
return []
def get_excess(production_order = None):
excess = 0
if production_order:
fgtf = frappe.db.sql("""select sum(c.qty) excess from `tabFinished Goods Transfer Form` p JOIN
`tabFinished Goods Transfer Item` c ON p.name = c.parent WHERE p.weekly_production_order_form = '%s'
GROUP BY weekly_production_order_form""" % production_order, as_list=1)
if len(fgtf):
excess = fgtf[0][0]
return excess
|
nilq/baby-python
|
python
|
import os
import time
import torch
from options.test_options_CMU import TestOptions
from torch.autograd import Variable
import numpy as np
from PIL import Image
from torch.utils.data import DataLoader
# from utils.label2Img import label2rgb
from dataloader.transform import Transform_test
from dataloader.dataset import NeoData_test
from networks import get_model
from eval import *
import argparse
def main(args):
despath = args.savedir
if not os.path.exists(despath):
os.mkdir(despath)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
imagedir = os.path.join(args.datadir, 'image.txt')
image2dir = os.path.join(args.datadir, 'image2.txt')
labeldir = os.path.join(args.datadir, 'label.txt')
transform = Transform_test(args.size)
dataset_test = NeoData_test(imagedir, image2dir, labeldir, transform)
loader = DataLoader(dataset_test, num_workers=2, batch_size=1, shuffle=False) #test data loader
model = get_model(args.num_classes, args.cd_model)
model_cis = get_model(args.num_classes, args.cis_model)
if args.cuda:
model = model.cuda()
model_cis = model_cis.cuda()
checkpoint1 = torch.load(args.cd_model_dir)
model.load_state_dict(checkpoint1,strict=False)
model.eval()
checkpoint2 = torch.load(args.cis_model_dir)
model_cis.load_state_dict(checkpoint2)
model_cis.eval()
count = 0
sum = 0
total = 0
for step, colign in enumerate(loader):
t1 = time.time()
img = colign[4].squeeze(0).numpy() #image-numpy,original image
img2 = colign[5].squeeze(0).numpy()
images = colign[0] #image-tensor
classi = colign[1]
images2 = colign[2]
label = colign[3] #label-tensor
file_name = colign[6]
image_name = file_name[0].split("/")[-1]
folder_name = file_name[0].split("/")[-3]
if args.cuda:
images = images.cuda()
images2 = images2.cuda()
classi = classi.cuda()
inputs = Variable(images, volatile=True)
inputs2 = Variable(images2, volatile=True)
stime = time.time()
c,n,dp=model_cis(inputs,inputs2,1)
pr, p1, p2, p3, p4, p5, x1, x2, x3, x4, x5, s2, s3, s4, s5, pf = model(c,n,dp)
etime = time.time()
sum += (etime-stime)
count += 1
print("This is the {}th of image!".format(count),"// Avg time/img: %.4f s" % (etime-stime))
out_pr = pr[0].cpu().max(0)[1].data.squeeze(0).byte().numpy() # index of max-channel
out_pf = pf[0].cpu().max(0)[1].data.squeeze(0).byte().numpy() # index of max-channel
Image.fromarray(out_pr * 255).save(despath + 'RSS_' + folder_name + '_' + image_name.split(".")[0] + '_pr.png')
Image.fromarray(out_pf * 255).save(despath + 'RSS_' + folder_name + '_' + image_name.split(".")[0] + '_pf.png')
if __name__ == '__main__':
parser = TestOptions().parse()
main(parser)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import smtplib
import mimetypes
import argparse
from email.mime.multipart import MIMEMultipart
from email import encoders
from email.message import Message
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
# Argument Parser
parser = argparse.ArgumentParser(description='Process inputs', formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=55))
parser.add_argument("-s", "--sender", metavar="<sender>", type=str, default="pythonscript@labs.test", help="def: pythonscript@labs.test")
parser.add_argument("-r", "--recipient", metavar="<recipient>", type=str, required=True)
parser.add_argument("-m", "--mta", metavar="<ip address>", type=str, required=True, help="IP address of next-hop MTA")
parser.add_argument("-p", "--port", metavar="<port>", type=str, help="Port email will send on (def: 25)", default="25")
parser.add_argument("-a", "--attach", metavar="<attachment>", type=str, nargs='+', help="Full or relative path to attachment")
parser.add_argument("-S", "--subject", metavar="<subject>", type=str, help="Subject of the email", default="email sent by python script")
# Mutually exclusive group for body types (you can use a string or a file, not both)
body_group = parser.add_mutually_exclusive_group()
body_group.add_argument("-b", "--body", metavar="<body>", type=str, help="String for the body of the email")
# body_group.add_argument("-B", "--body", metavar="<body>", type=str, help="Full or relative path to email body file")
parser.add_argument("-H", action="store_true", help="Adds an HTML body in addition to the plain text body")
parser.add_argument("-t", action="store_true", help="Enable TLS")
parser.add_argument("-q", action="store_true", help="Attempts to get a queue id, but may have unexpected results")
parser.add_argument("-v", action="store_true", help="Verbose mode")
args = parser.parse_args()
# Creates key/value pair to return qids and filenames
qids = {}
def main():
# Build the SMTP Connection
server = buildsmtp()
# Iterate through, building and sending messages for each attachment provided
for a in args.attach:
msg = buildmsg(a)
qid = sendmsg(server, msg)
qids[qid] = a
# Close SMTP connection
prquit = server.docmd("QUIT")
if (args.v):
print prquit
# Debugging
#for x in qids:
# print x, qids[x]
return qids
def buildsmtp():
# Create the SMTP object (server format "ip:port") Note: This actually checks to see if the port is open
try:
server = smtplib.SMTP(args.mta + ":" + args.port)
except:
print "Error 001: Unable to connect to " + args.mta + " on port " + args.port
exit()
# If selected, attempts to negotiate TLS (also, prhelo = print helo)
if args.t:
prhelo = server.ehlo()
try:
server.starttls()
server.ehlo()
if args.v:
print "TlS started successfully."
except:
print "TLS was not accepted by " + args.mta + ". \nAttempting to send unencrypted."
# If no TLS flag, initiates the connection
else:
try:
prhelo = server.docmd("helo", "labs.test")
except:
print "Error 002: Sending email failed, could be a bad address?"
if args.v:
print "Attempting to send the email to " + args.mta + ":" + args.port
if args.v:
print prhelo
# NOT YET IMPLEMENTED
# This can be used for server auth (like gmail), but it's disabled. You will need to add the 'server.login(username,password)' line in somewhere
# username = "user"
# password = "password"
# server.login(username,password)
return server
def buildmsg(a):
# Create the message and add sender, recipient and subject (This will be used if you aren't using the -q flag)
msg = MIMEMultipart()
msg["From"] = args.sender
msg["To"] = args.recipient
msg["Subject"] = args.subject
msg.preamble = args.subject
# Create the alternative for the text/plain and text/html. This object is attached inside the multipart message
alt_msg = MIMEMultipart('alternative')
# Verbose logging to display to/from/subj
if args.v:
print "\n### Verbose Output Enabled ###\n"
print "From: " + args.sender
print "To: " + args.recipient
print "Subject: " + args.subject
if a:
print "Attachment: " + os.path.basename(a) + "\n"
# Attaches text/plain. Also attaches HTML if it is selected
# https://docs.python.org/3/library/email-examples.html (RFC 2046)
alt_msg.attach(MIMEText(args.body, "plain"))
if args.H:
alt_msg.attach(MIMEText(args.body, "html"))
msg.attach(alt_msg)
# Checks for an attachment argument, and if there is one identify it's type.
# Borrowed from https://docs.python.org/2.4/lib/node597.html
if a is not None:
ctype, encoding = mimetypes.guess_type(a)
if ctype is None or encoding is not None:
ctype = "application/octet-stream"
maintype, subtype = ctype.split("/", 1)
if maintype == "text":
fp = open(a)
# Note: we should handle calculating the charset
attachment = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == "image":
fp = open(a, "rb")
attachment = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == "audio":
fp = open(a, "rb")
attachment = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(a, "rb")
attachment = MIMEBase(maintype, subtype)
attachment.set_payload(fp.read())
fp.close()
encoders.encode_base64(attachment)
attachment.add_header("Content-Disposition", "attachment", filename=os.path.basename(a))
msg.attach(attachment)
# This line will literally print the entire email including headers
# print "\n\n\n" + msg.as_string() + "\n\n\n"
return msg
def sendmsg(server, msg):
# Sends the email DATA
prfrom = server.docmd("MAIL from:", args.sender)
prto = server.docmd("RCPT to:", args.recipient)
prdata = server.docmd("DATA")
qidline = server.docmd(msg.as_string() + "\r\n.")
# Prints what happened above when attempting to send
if args.v:
print prfrom
print prto
print prdata
print qidline
qid = qidline[1].split(" ")[4]
if args.q:
print qid
return qid
if __name__== "__main__":
main()
|
nilq/baby-python
|
python
|
# flake8: noqa
from .gs_counterfactuals import growing_spheres_search
|
nilq/baby-python
|
python
|
import logging
import random
import numpy as np
connect_success = True
logger = logging.getLogger(__name__)
set_l3t_count = 0
clear_l3t_count = 0
def connect(ami_str):
logger.debug('simulated pyami connect')
if not connect_success:
raise RuntimeError('simulated fail')
else:
Entry._connected = True
def set_l3t(filter_string, l3t_file):
global set_l3t_count
set_l3t_count += 1
def clear_l3t():
global clear_l3t_count
clear_l3t_count += 1
class Entry:
_connected = False
def __init__(self, ami_name, ami_type, filter_string=None):
logger.debug('Initializing test pyami Entry %s', ami_name)
self._ami_name = ami_name
if not connect_success:
raise RuntimeError('simulated fail: bad connection')
if not Entry._connected:
raise RuntimeError('simulated fail: did not call connect')
self._filt = filter_string
self.clear()
def get(self):
if len(self._values):
return dict(mean=np.mean(self._values),
rms=np.std(self._values),
entries=len(self._values))
else:
return dict(mean=0, rms=0, entries=0)
def clear(self):
self._count = random.randint(1, 100)
self._values = [random.random() for i in range(self._count)]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
import os.path
from getpass import getuser
from os import remove, rmdir
from socket import gethostname
from tempfile import mkdtemp
from ansible_collections.community.crypto.plugins.module_utils.openssh.cryptography import (
AsymmetricKeypair,
HAS_OPENSSH_SUPPORT,
InvalidCommentError,
InvalidPrivateKeyFileError,
InvalidPublicKeyFileError,
InvalidKeySizeError,
InvalidKeyTypeError,
InvalidPassphraseError,
OpensshKeypair
)
DEFAULT_KEY_PARAMS = [
(
'rsa',
None,
None,
None,
),
(
'dsa',
None,
None,
None,
),
(
'ecdsa',
None,
None,
None,
),
(
'ed25519',
None,
None,
None,
),
]
VALID_USER_KEY_PARAMS = [
(
'rsa',
8192,
'change_me'.encode('UTF-8'),
'comment',
),
(
'dsa',
1024,
'change_me'.encode('UTF-8'),
'comment',
),
(
'ecdsa',
521,
'change_me'.encode('UTF-8'),
'comment',
),
(
'ed25519',
256,
'change_me'.encode('UTF-8'),
'comment',
),
]
INVALID_USER_KEY_PARAMS = [
(
'dne',
None,
None,
None,
),
(
'rsa',
None,
[1, 2, 3],
'comment',
),
(
'ecdsa',
None,
None,
[1, 2, 3],
),
]
INVALID_KEY_SIZES = [
(
'rsa',
1023,
None,
None,
),
(
'rsa',
16385,
None,
None,
),
(
'dsa',
256,
None,
None,
),
(
'ecdsa',
1024,
None,
None,
),
(
'ed25519',
1024,
None,
None,
),
]
@pytest.mark.parametrize("keytype,size,passphrase,comment", DEFAULT_KEY_PARAMS)
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_default_key_params(keytype, size, passphrase, comment):
result = True
default_sizes = {
'rsa': 2048,
'dsa': 1024,
'ecdsa': 256,
'ed25519': 256,
}
default_comment = "%s@%s" % (getuser(), gethostname())
pair = OpensshKeypair.generate(keytype=keytype, size=size, passphrase=passphrase, comment=comment)
try:
pair = OpensshKeypair.generate(keytype=keytype, size=size, passphrase=passphrase, comment=comment)
if pair.size != default_sizes[pair.key_type] or pair.comment != default_comment:
result = False
except Exception as e:
print(e)
result = False
assert result
@pytest.mark.parametrize("keytype,size,passphrase,comment", VALID_USER_KEY_PARAMS)
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_valid_user_key_params(keytype, size, passphrase, comment):
result = True
try:
pair = OpensshKeypair.generate(keytype=keytype, size=size, passphrase=passphrase, comment=comment)
if pair.key_type != keytype or pair.size != size or pair.comment != comment:
result = False
except Exception as e:
print(e)
result = False
assert result
@pytest.mark.parametrize("keytype,size,passphrase,comment", INVALID_USER_KEY_PARAMS)
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_invalid_user_key_params(keytype, size, passphrase, comment):
result = False
try:
OpensshKeypair.generate(keytype=keytype, size=size, passphrase=passphrase, comment=comment)
except (InvalidCommentError, InvalidKeyTypeError, InvalidPassphraseError):
result = True
except Exception as e:
print(e)
pass
assert result
@pytest.mark.parametrize("keytype,size,passphrase,comment", INVALID_KEY_SIZES)
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_invalid_key_sizes(keytype, size, passphrase, comment):
result = False
try:
OpensshKeypair.generate(keytype=keytype, size=size, passphrase=passphrase, comment=comment)
except InvalidKeySizeError:
result = True
except Exception as e:
print(e)
pass
assert result
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_valid_comment_update():
pair = OpensshKeypair.generate()
new_comment = "comment"
try:
pair.comment = new_comment
except Exception as e:
print(e)
pass
assert pair.comment == new_comment and pair.public_key.split(b' ', 2)[2].decode() == new_comment
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_invalid_comment_update():
result = False
pair = OpensshKeypair.generate()
new_comment = [1, 2, 3]
try:
pair.comment = new_comment
except InvalidCommentError:
result = True
assert result
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_valid_passphrase_update():
result = False
passphrase = "change_me".encode('UTF-8')
try:
tmpdir = mkdtemp()
keyfilename = os.path.join(tmpdir, "id_rsa")
pair1 = OpensshKeypair.generate()
pair1.update_passphrase(passphrase)
with open(keyfilename, "w+b") as keyfile:
keyfile.write(pair1.private_key)
with open(keyfilename + '.pub', "w+b") as pubkeyfile:
pubkeyfile.write(pair1.public_key)
pair2 = OpensshKeypair.load(path=keyfilename, passphrase=passphrase)
if pair1 == pair2:
result = True
finally:
if os.path.exists(keyfilename):
remove(keyfilename)
if os.path.exists(keyfilename + '.pub'):
remove(keyfilename + '.pub')
if os.path.exists(tmpdir):
rmdir(tmpdir)
assert result
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_invalid_passphrase_update():
result = False
passphrase = [1, 2, 3]
pair = OpensshKeypair.generate()
try:
pair.update_passphrase(passphrase)
except InvalidPassphraseError:
result = True
assert result
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_invalid_privatekey():
result = False
try:
tmpdir = mkdtemp()
keyfilename = os.path.join(tmpdir, "id_rsa")
pair = OpensshKeypair.generate()
with open(keyfilename, "w+b") as keyfile:
keyfile.write(pair.private_key[1:])
with open(keyfilename + '.pub', "w+b") as pubkeyfile:
pubkeyfile.write(pair.public_key)
OpensshKeypair.load(path=keyfilename)
except InvalidPrivateKeyFileError:
result = True
finally:
if os.path.exists(keyfilename):
remove(keyfilename)
if os.path.exists(keyfilename + '.pub'):
remove(keyfilename + '.pub')
if os.path.exists(tmpdir):
rmdir(tmpdir)
assert result
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_mismatched_keypair():
result = False
try:
tmpdir = mkdtemp()
keyfilename = os.path.join(tmpdir, "id_rsa")
pair1 = OpensshKeypair.generate()
pair2 = OpensshKeypair.generate()
with open(keyfilename, "w+b") as keyfile:
keyfile.write(pair1.private_key)
with open(keyfilename + '.pub', "w+b") as pubkeyfile:
pubkeyfile.write(pair2.public_key)
OpensshKeypair.load(path=keyfilename)
except InvalidPublicKeyFileError:
result = True
finally:
if os.path.exists(keyfilename):
remove(keyfilename)
if os.path.exists(keyfilename + '.pub'):
remove(keyfilename + '.pub')
if os.path.exists(tmpdir):
rmdir(tmpdir)
assert result
@pytest.mark.skipif(not HAS_OPENSSH_SUPPORT, reason="requires cryptography")
def test_keypair_comparison():
assert OpensshKeypair.generate() != OpensshKeypair.generate()
assert OpensshKeypair.generate() != OpensshKeypair.generate(keytype='dsa')
assert OpensshKeypair.generate() != OpensshKeypair.generate(keytype='ed25519')
assert OpensshKeypair.generate(keytype='ed25519') != OpensshKeypair.generate(keytype='ed25519')
try:
tmpdir = mkdtemp()
keys = {
'rsa': {
'pair': OpensshKeypair.generate(),
'filename': os.path.join(tmpdir, "id_rsa"),
},
'dsa': {
'pair': OpensshKeypair.generate(keytype='dsa', passphrase='change_me'.encode('UTF-8')),
'filename': os.path.join(tmpdir, "id_dsa"),
},
'ed25519': {
'pair': OpensshKeypair.generate(keytype='ed25519'),
'filename': os.path.join(tmpdir, "id_ed25519"),
}
}
for v in keys.values():
with open(v['filename'], "w+b") as keyfile:
keyfile.write(v['pair'].private_key)
with open(v['filename'] + '.pub', "w+b") as pubkeyfile:
pubkeyfile.write(v['pair'].public_key)
assert keys['rsa']['pair'] == OpensshKeypair.load(path=keys['rsa']['filename'])
loaded_dsa_key = OpensshKeypair.load(path=keys['dsa']['filename'], passphrase='change_me'.encode('UTF-8'))
assert keys['dsa']['pair'] == loaded_dsa_key
loaded_dsa_key.update_passphrase('change_me_again'.encode('UTF-8'))
assert keys['dsa']['pair'] != loaded_dsa_key
loaded_dsa_key.update_passphrase('change_me'.encode('UTF-8'))
assert keys['dsa']['pair'] == loaded_dsa_key
loaded_dsa_key.comment = "comment"
assert keys['dsa']['pair'] != loaded_dsa_key
assert keys['ed25519']['pair'] == OpensshKeypair.load(path=keys['ed25519']['filename'])
finally:
for v in keys.values():
if os.path.exists(v['filename']):
remove(v['filename'])
if os.path.exists(v['filename'] + '.pub'):
remove(v['filename'] + '.pub')
if os.path.exists(tmpdir):
rmdir(tmpdir)
assert OpensshKeypair.generate() != []
|
nilq/baby-python
|
python
|
# Copyright (c) 2015, VMRaid Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
def execute():
from vmraid.installer import remove_from_installed_apps
remove_from_installed_apps("shopping_cart")
|
nilq/baby-python
|
python
|
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from django.db import models
from publications.models.orderedmodel import OrderedModel
from string import replace, split, strip
class Type(OrderedModel):
class Meta:
ordering = ('order',)
app_label = 'publications'
type = models.CharField(max_length=128)
description = models.CharField(max_length=128)
bibtex_types = models.TextField(default='article',
verbose_name='BibTex types',
help_text='Possible BibTex types, separated by comma.')
hidden = models.BooleanField(
help_text='Hide publications from main view.')
def __unicode__(self):
return "%s (%s)" % (self.type, self.description)
def __init__(self, *args, **kwargs):
OrderedModel.__init__(self, *args, **kwargs)
self.bibtex_types = replace(self.bibtex_types, '@', '')
self.bibtex_types = replace(self.bibtex_types, ';', ',')
self.bibtex_types = replace(self.bibtex_types, 'and', ',')
self.bibtex_type_list = [strip(s).lower()
for s in split(self.bibtex_types, ',')]
self.bibtex_types = ', '.join(self.bibtex_type_list)
self.bibtex_type = self.bibtex_type_list[0]
|
nilq/baby-python
|
python
|
import keras # noqa: F401
import numpy as np
import tensorflow as tf
import wandb
from wandb.keras import WandbCallback
def main():
wandb.init(name=__file__)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(3, 3, activation="relu", input_shape=(28, 28, 1)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(10, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
model.fit(
np.ones((10, 28, 28, 1)),
np.ones((10,)),
epochs=7,
validation_split=0.2,
callbacks=[WandbCallback()],
)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
class NotImplemented(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, args, kwargs)
class NoSuchVirtualMachine(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, args, kwargs)
class InvalidOperation(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, args, kwargs)
self._msg = args[0]
def __str__(self):
return self._msg
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
nilq/baby-python
|
python
|
from .wide_resnet50 import WideResNet50
__all__ = ["WideResNet50"]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 16:35:56 2022
@author: Pedro
"""
def score(mode: str, puntosp1: int, puntosp2: int, name1: str, name2: str, puntajes: tuple, auto_ch: str)->str:
"""Funcion que simula un game de Tenis, ya sea de forma manual, es decir
con intervencion del usuario en los puntos, o de forma automatica, dejando
a eleccion del usuario si mostrar o no los puntajes en todas las jugadas
Argumentos:
mode -- Variable que decide que modo de juego se ejecutara. Tomara el valor
'1' para ejecutar el modo de juego manual y el '2' para ejecutar el
automatico.
puntosp1 -- Variable que guarda el puntaje del primer jugador
puntosp2 -- Variable que guarda el puntaje del segundo jugador
name1 -- Variable que guarda el nombre del primer jugador
name2 -- Variable que guarda el nombre del segundo jugador
puntajes -- Variable de tipo tupla que guarda los puntajes reglamentarios
del tenis, usados para llamarlos usando a puntosp1 y puntosp2 como numeros
de indice
auto_ch -- Variable que decide si se imprimen las jugadas del game o solo
el resultado en caso de que se ejecute el modo automatico.
"""
import random
import time
while True:
if mode == '1':
score_sum = input("¿Quién marca? ")
if mode == '2':
score_sum = random.choice('1' '2')
if score_sum == "1":
puntosp1 += 1
if auto_ch == "1":
print (f"\n{name1} ha marcado un punto")
if puntosp1 == 4 and puntosp2 < 3:
print (f"\n{name1} ha ganado el game")
break
if puntosp1 == 5 and puntosp2 == 3:
print(f"\n{name1} ha ganado el game")
break
if puntosp1 == 4 and puntosp2 == 4 and auto_ch == "1":
puntosp1 -= 1
puntosp2 -= 1
print (f"\nEl game ahora va:\n{name1}: {puntajes[puntosp1]} - {name2}: {puntajes[puntosp2]}")
if mode == "2" and auto_ch == "1":
time.sleep(2)
elif auto_ch == "1":
print (f"\nEl game ahora va:\n{name1}: {puntajes[puntosp1]} - {name2}: {puntajes[puntosp2]}")
if mode == "2" and auto_ch == "1":
time.sleep(2)
elif score_sum == "2":
puntosp2 += 1
if auto_ch == "1":
print (f"\n{name2} ha marcado un punto")
if puntosp2 == 4 and puntosp1 < 3:
print (f"\n{name2} ha ganado el game")
break
if puntosp2 == 5 and puntosp1 == 3:
print(f"\n{name2} ha ganado el game")
break
if puntosp1 == 4 and puntosp2 == 4 and auto_ch == "1":
puntosp1 -= 1
puntosp2 -= 1
print (f"\nEl game ahora va:\n{name1}: {puntajes[puntosp1]} - {name2}: {puntajes[puntosp2]}")
if mode == "2" and auto_ch == "1":
time.sleep(2)
elif auto_ch == "1":
print (f"\nEl game ahora va:\n{name1}: {puntajes[puntosp1]} - {name2}: {puntajes[puntosp2]}")
if mode == "2" and auto_ch == "1":
time.sleep(2)
return "Partido Terminado"
|
nilq/baby-python
|
python
|
import json
import sys
class PolicyResolver():
def __init__(self):
self.desc = []
self.desc_by_label = {}
def read_json(self, fname):
'''
reads the JSON produced by the cle preprocessor
'''
with open(fname) as f:
fs = f.read()
desc1 = json.loads(fs)
self.desc = [x for x in desc1 if 'cle-json' in x and 'level' in x['cle-json']]
self.desc_by_label = {x["cle-label"] : x for x in self.desc}
if len(self.desc) != len(desc1):
nojs = [x["cle-label"] for x in desc1 if 'cle-json' not in x]
return "No definition for following label(s): " + ",".join(nojs)
else:
return None
def get_labels(self):
'''
returns list of labels defined in the program
'''
ret = set()
for l in self.desc:
ret.add(l['cle-label'])
return list(ret)
def get_enclaves(self):
'''
returns list of security enclaves defined in the program
'''
ret = set()
for l in self.desc:
ret.add(l['cle-json']['level'])
return list(ret)
def path_exists(self, oe, e):
'''
returns True if there is a possibility that data can flow
from enclave oe to e
'''
#XXX to be done taking default policy into account
for l in self.desc:
if l['cle-json']['level'] == oe and 'cdf' in l['cle-json']:
for c in l['cle-json']['cdf']:
if c['remotelevel'] == e and (c['direction'] == 'egress' or c['direction'] == 'bidirectional'):
return True
return False
def get_common_enclaves(self):
'''
returns list of enclaves into which the data may flow,
even if it needs to be guarded
'''
#it is a reachability graph problem
#XXX to be implemeted properly later
ret = []
encs = self.get_enclaves()
for e in encs:
#can it be reached from all other enclaves?
e_reachable = True
for oe in encs:
if oe != e:
e_reachable = e_reachable and self.path_exists(oe, e)
if e_reachable:
ret.append(e)
return ret
def get_label_enclave(self, ll):
'''
returns map {label : enclave} where label is one of the labels in parameter
and enclave is the enclave name for that label
'''
ret = {}
for l in ll:
for o in self.desc:
if o['cle-label'] == l:
ret[l] = o['cle-json']['level']
return ret
def resolve_function(self, funct_node, target_enc):
ann = funct_node.get('annotation')
tai = funct_node.get('taint')
dbinf = funct_node.get('dbginfo')
f_name = dbinf.get_name()
if tai is None:
return False, "Function '%s' needs XD annotation"%(f_name)
if self.get_label_enclave([tai]).get(tai, "") != ann:
return False, "Need to verify active annotations for '%s'"%(f_name)
desc = self.desc_by_label.get(tai)
if not (desc and 'cle-json' in desc):
return False, "Function '%s' should be annotated with label '%s' that has no definition"%(f_name, tai)
cjd = desc['cle-json']
if not ('cdf' in cjd and len(cjd['cdf']) == 1):
return False, "Definition of label '%s' does not have exactly one 'cdf' section"
cdf = cjd['cdf'][0]
needed = set(['argtaints' ,'codtaints', 'rettaints' ,'remotelevel', 'direction', 'guarddirective'])
if not needed.issubset(set(cdf.keys())):
return False, "Definition of label '%s' has to comply with XD convention"%(tai)
if cdf['remotelevel'] != target_enc:
return False, "Label '%s' needs 'remotelevel' to be '%s'"%(tai, target_enc)
return True, "Ready to be replaced with RPC"
if __name__ == "__main__":
p = PolicyResolver()
p.read_json(sys.argv[1])
print(p.get_labels())
print(p.get_enclaves())
print(p.get_common_enclaves())
|
nilq/baby-python
|
python
|
"""
Unit tests: testing a small bit of code like function or class in isolation of the system
From the developers perspective
"""
import mimetypes
import os
from pathlib import Path
import cv2
import pytest
from ..colordetect import ColorDetect, VideoColor, col_share
def test_image_vid_parsed_to_class(image, video):
"""
test whether an image/video is parsed to the class ColorDetect(<image>)
Check whether an instance is created
"""
isinstance(ColorDetect(image), object)
isinstance(VideoColor(video), object)
def test_color_detect_gets_numpy_array_from_video(image, video):
"""
Test whether the filename used in the test is the first image
"""
# user_video = VideoColor(video)
pass
def test_get_color_count_has_correct_color_and_count(image):
"""
Ensure get_color_count gets the correct color and count
"""
user_image = ColorDetect(image)
# since the image is plain 255,255,255
assert len(user_image.get_color_count(color_count=1)) == 1
assert user_image.get_color_count(color_count=1) == {"white": 100.0}
def test_what_is_in_dictionary_is_being_written(datadir, image):
"""
What is in the dictionary should be what is being written
"""
user_image = ColorDetect(image)
# color_dictionary = user_image.get_color_count(color_count=1)
user_image.get_color_count(color_count=1)
file_name = "out.jpg"
user_image.save_image(location=datadir, file_name=file_name)
# result_image = os.path.join(datadir, file_name)
def test_valid_color_format_is_parsed(image, video):
"""
An exception is raised if an invalid color_format is parsed
"""
user_image = ColorDetect(image)
user_video = VideoColor(video)
with pytest.raises(Exception):
user_image.get_color_count(color_count=1, color_format="invalid_random_format")
with pytest.raises(Exception):
user_video.get_video_frames(
frame_color_count=1, color_format="invalid_random_format"
)
user_image.get_color_count(color_count=1)
with pytest.raises(Exception):
user_image.write_color_count(font_color=(267, 0, 0))
def test_valid_params_to_get_color_count(image):
"""
An exception is raised if an invalid color_count value is parsed. Instance, a string
"""
user_image = ColorDetect(image)
with pytest.raises(Exception):
user_image.get_color_count(color_count="many_colors")
def test_save_params_are_valid(image, datadir):
"""
A string is being used as a file name as well as location
"""
user_image = ColorDetect(image)
user_image.get_color_count(color_count=1)
with pytest.raises(Exception):
user_image.save_image(location=datadir, file_name=5)
# with pytest.raises(Exception) as e_info:
# user_image.save_image(location=500, file_name="output.jpg")
def test_result_file_name_is_valid(image, datadir):
"""
test result filename has what was given as the file name
:param image:
:param datadir:
:return:
"""
user_image = ColorDetect(image)
user_image.get_color_count(color_count=1)
file_name = "ramble.jpg"
user_image.save_image(location=datadir, file_name=file_name)
saved_file = os.path.basename(Path(datadir / file_name))
assert saved_file == file_name
def test_progress_bar_shows_correct_percentage(video):
"""
ensure the percentage displayed is correct
:param video:
:return:
"""
# user_video = VideoColor(video)
# user_video.get_video_frames(progress=True)
pass
def test_get_video_frames_gets_correct_params(video):
user_video = VideoColor(video)
with pytest.raises(Exception):
user_video.get_video_frames(color_format="invalid_random_format")
with pytest.raises(Exception):
user_video.get_video_frames(frame_color_count="1")
with pytest.raises(Exception):
user_video.get_video_frames(progress=24)
def test_ordered_colors_are_correct_count(video):
"""
test sorted colors gets correct params and returns correct color count
:param video:
"""
user_video = VideoColor(video)
all_colors = user_video.get_video_frames()
with pytest.raises(Exception):
col_share.sort_order(object_description=all_colors, key_count="5")
with pytest.raises(Exception):
col_share.sort_order(object_description=all_colors, ascending="random")
dominant_colors = col_share.sort_order(object_description=all_colors, key_count=6)
assert len(dominant_colors) == 6
"""
below line might fail as colors are grabbed on the second instead of per frame
hence two consecutive calls might grab diff frames on the same second
"""
# assert list(dominant_colors.values()) == [68.83, 22.48, 22.22, 21.7, 19.11, 17.77]
def test_validation_of_rgb_is_correct(image):
"""
test a valid rgb format can be identified
"""
user_image = ColorDetect(image)
assert user_image._validate_rgb((255, 0, 0))
assert not user_image._validate_rgb((256, 0, 0))
assert not user_image._validate_rgb((255, -2, 0))
|
nilq/baby-python
|
python
|
""" Main application
"""
import logging
from aiohttp import web
from .db import setup_db
from .rest import setup_rest, create_router
from .session import setup_session
from .settings import CONFIG_KEY
log = logging.getLogger(__name__)
def create(config):
"""
Initializes service
"""
log.debug("Initializing app ... ")
app = web.Application(router=create_router())
app[CONFIG_KEY] = config
setup_db(app)
setup_session(app)
setup_rest(app)
return app
def run(config):
""" Runs service
NOTICE it is sync!
"""
log.debug("Serving app ... ")
app = create(config)
web.run_app(app,
host=config["app"]["host"],
port=config["app"]["port"])
|
nilq/baby-python
|
python
|
import sys
import os
def find_directories(directory):
for dirpath, dirs, files in os.walk(str(directory)):
for dr in dirs:
print(dr)
if __name__ == '__main__':
find_directories(sys.argv[1])
|
nilq/baby-python
|
python
|
'''
Programa para jugar al tateti
Valentin Berman 13/02/20
'''
# Constantes
NADA = '-'
X = 'x'
O = 'o'
MOV = 'hay movimientos'
GANA_X = 1
GANA_O = -1
EMPATE = 0
MAX = 'max' # el jugador con X es el MAX
MIN = 'min' # el jugador con O es el MIN
# Clases
class Tateti():
'''
Clase que define un tablero de tateti
'''
# Funciones internas
def __init__(self):
self.tablero = [
NADA, NADA, NADA, # | 0 1 2 |
NADA, NADA, NADA, # | 3 4 5 |
NADA, NADA, NADA # | 6 7 8 |
]
self.turno = X
self.movDisp = [1,2,3,4,5,6,7,8,9]
def __str__(self):
'''
Devuelve el tablero actual
'''
s = '\n\tTABLERO:\n\n'
for fila in range(3):
s = s + "\t| %s %s %s |" % tuple(self.tablero[fila*3:fila*3+3]) + '\n'
s = s + "\n\tTURNO: %s\n" % self.turno
return s
def _minimax(self, tablero, jugador):
'''
Implementación del algoritmo minimax para tateti
'''
estado = self._estado(tablero)
if estado != MOV:
return estado
# Jugador que maximiza, en este caso X
if jugador == MAX:
maxEvalu = -1
for indice, celda in enumerate(tablero):
if celda == NADA:
nuevoTablero = tablero.copy()
nuevoTablero[indice] = X # Recordar que X es el jugador MAX
evalu = self._minimax(nuevoTablero, MIN)
maxEvalu = max(evalu, maxEvalu)
del nuevoTablero
return maxEvalu
# Jugador que minimiza, en este caso O
if jugador == MIN:
minEvalu = 1
for indice, celda in enumerate(tablero):
if celda == NADA:
nuevoTablero = tablero.copy()
nuevoTablero[indice] = O # Recordar que O es el jugador MIN
evalu = self._minimax(nuevoTablero, MAX)
minEvalu = min(evalu, minEvalu)
del nuevoTablero
return minEvalu
def _gano(self, tablero, ficha):
'''
Devuelve True si 'ficha' ganó, si no devuelve False
'''
posGanadoras = (
(0,1,2),
(3,4,5),
(6,7,8),
(0,3,6),
(1,4,7),
(2,5,8),
(0,4,8),
(2,4,6),
)
if ficha not in (NADA, X, O):
raise ValueError("'ficha' debe ser NADA, X o O")
posConFicha=[]
for indice, celda in enumerate(tablero):
if celda == ficha:
posConFicha.append(indice)
for pos in posGanadoras:
if pos[0] in posConFicha:
if pos[1] in posConFicha:
if pos[2] in posConFicha:
return True
else:
continue
else:
continue
else:
continue
return False
def _lleno(self, tablero):
'''
Devuelve True si todas las celdas del tablero están ocupadas, si no, false
'''
for celda in tablero:
if celda == NADA:
return False
return True
def _estado(self, tablero):
'''
Devuelve el estado actual del tablero
MOV si hay movimientos
GANA_X si ganó X
GANA_O si ganó O
EMPATE si hay un empate
'''
if self._gano(tablero, X):
return GANA_X
elif self._gano(tablero, O):
return GANA_O
elif self._lleno(tablero):
return EMPATE
else:
return MOV
def ver(self):
'''
Imprime en pantalla el tablero actual
Equivalente a print(Tateti)
'''
print('')
for fila in range(3):
print("\t| %s %s %s |" % tuple(self.tablero[fila*3:fila*3+3]))
print('')
def verTurno(self):
print("\n\tTURNO: %s\n" % self.turno)
def preparar(self, ficha, lpos):
'''
'lpos' es una lista de posiciones que se cambian al valor de 'ficha'.
Cambia el turno del tablero, por lo tanto se recomiendo tener en
cuenta que en el primer turno se juega X, el segundo O, etc.
Devuelve el turno actual
Las posiciones son:
| 1 2 3 |
| 4 5 6 |
| 7 8 9 |
'''
if ficha not in (X, O, NADA):
raise ValueError("'ficha' debe ser X, O o NADA")
if type(lpos) is not list:
raise ValueError("'lpos' debe ser una lista")
for pos in lpos:
if pos > 9 or pos < 1:
raise ValueError("los elementos de 'lpos' deben estar entre 1 y 9")
self.tablero[pos-1] = ficha
cuent = 0
self.movDisp = [1,2,3,4,5,6,7,8,9]
for indice, celda in enumerate(self.tablero):
if celda in [X,O]:
cuent += 1
self.movDisp.remove(indice+1)
self.turno = X if (cuent % 2 == 0) else O
return self.movDisp
def jugar(self, pos):
'''
Juega en la posición 'pos'. Elige la ficha a jugar
automáticamente. Las X juegan primero.
Devuelve los movimientos deisponibles
Las posiciones son:
| 1 2 3 |
| 4 5 6 |
| 7 8 9 |
'''
if pos > 9 or pos < 1:
raise ValueError("'pos' debe estar entre 1 y 9")
if pos not in self.movDisp:
raise ValueError("'%d' no es un movimiento disponible" % pos)
self.tablero[pos-1] = self.turno
self.turno = O if (self.turno == X) else X
self.movDisp.remove(pos)
return self.movDisp
def reiniciar(self):
'''
Reinicia el tablero
'''
self.tablero = [
NADA, NADA, NADA,
NADA, NADA, NADA,
NADA, NADA, NADA
]
self.turno = X
self.movDisp = [1,2,3,4,5,6,7,8,9]
def estado(self):
'''
Devuelve el estado actual del tablero
MOV si hay movimientos
GANA_X si ganó X
GANA_O si ganó O
EMPATE si hay un empate
'''
return self._estado(self.tablero)
def mejorMovimiento(self):
'''
Devuelve el mejor movimiento
'''
try:
assert not self._lleno(self.tablero)
except AssertionError:
raise AssertionError("El tablero no tiene movimientos disponibles")
# Si juega X
if self.turno == X:
maxEvalu = -1
for mov in self.movDisp:
nuevoTablero = self.tablero.copy()
nuevoTablero[mov-1] = X
evalu = self._minimax(nuevoTablero, MIN)
if evalu >= 1:
return mov
if evalu >= maxEvalu:
maxEvalu = evalu
mejorMov = mov
return mejorMov
# Si juega O
if self.turno == O:
minEvalu = 1
for mov in self.movDisp:
nuevoTablero = self.tablero.copy()
nuevoTablero[mov-1] = O
evalu = self._minimax(nuevoTablero, MAX)
if evalu <= -1:
return mov
if evalu <= minEvalu:
minEvalu = evalu
mejorMov = mov
return mejorMov
def cualquierMovimiento(self):
from random import choice
return choice(self.movDisp)
# Programa principal
if __name__ == '__main__':
from time import sleep
# Funciones
def prompt():
return input('>>> ').lower()
# Variables
instrucciones = """
tateti por Valentin Berman
Instrucciones:
q - salir
h - imprime este texto
[1-9] - selecciona una celda para jugar
Empieza usted, jugando con X
"""
lComandos = ('h', 'q', '1', '2', '3', '4', '5', '6', '7', '8', '9')
mensajeError = """Comando desconocido. Use 'h' para ver todo los comandos."""
celdaOcupada = """Esa celda ya está ocupada, elija otra"""
mensajeGanador = """Felicidades, usted ganó!!\nJugar de nuevo? (s/n)"""
mensajeEmpate = """Empate!!\nJugar de nuevo? (s/n)"""
mensajePerdedor = """Perdiste, que lástima!!\nJugar de nuevo? (s/n)"""
# Programa
print(instrucciones)
ttt = Tateti()
while True: # Loop de todo el juego
ttt.reiniciar()
while True: # Loop de turno
comd = prompt()
while comd not in lComandos:
print(mensajeError)
comd = prompt()
if comd == 'q': # Salir
exit()
elif comd == 'h': # Ayuda
print(instrucciones)
elif comd in ('1', '2', '3', '4', '5', '6', '7', '8', '9'): # Juego
# Turno Jugador:
comd = int(comd) # comd cambia de str a int !!!
if comd not in ttt.movDisp:
print(celdaOcupada)
continue
ttt.jugar(comd)
ttt.ver()
if ttt.estado() in (GANA_X,EMPATE):
if ttt.estado() == GANA_X:
print(mensajeGanador)
else:
print(mensajeEmpate)
comd = prompt()
while comd not in ('s','n','si','no'):
print("Jugar de nuevo? ('si' o 's' para jugar, 'no' o 'n' para salir)")
comd = prompt()
if comd in ('n', 'no'):
exit()
else:
print("Nueva ronda\n")
break
# Turno maquina:
print("Es mi turno. Pensando", end='', flush=True) # Espera un segundo para experiencia de usuario
for _ in range(5):
sleep(0.5)
print('.',end='',flush=True)
print()
ttt.jugar(ttt.mejorMovimiento())
ttt.ver()
if ttt.estado() == GANA_O:
print(mensajePerdedor)
comd = prompt()
while comd not in ('s','n','si','no'):
print("Jugar de nuevo? ('si' o 's' para jugar, 'no' o 'n' para salir)")
comd = prompt()
if comd in ('n', 'no'):
exit()
else:
print("Nueva ronda\n")
break
|
nilq/baby-python
|
python
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class PositionUnit(Enum):
PIXELS = "PIXELS"
PERCENTS = "PERCENTS"
|
nilq/baby-python
|
python
|
from ares.Lib import Ares
"""
GENERAL PURPOSE
---------------
This script will aim to show how to store data into a database - nothing more
Finally database_connection_2.py will show a simple example of data extraction using the AReS connectors capabilities
(see the scripts example connectors.py for more info),do some simple transformations, store it in the database.
The aim of this module is to show a quick example on retrieving
the data directly from the db if it's there or go through the data extraction step if it's missing
After going through these examples you should have the basics to use databases within AReS
PRE-REQUISITE
-------------
The database we will create in this script will be defined in the models folder as indicated with the modelPath argument
by default it will parse all the python scripts in that folder unless the filename argument is specified
"""
import random
import string
aresObj = Ares.ReportAPI()
#create the database - the on_init() function will be called if it exists in the model file (here test_models.py)
#if no database argument is specified the database will be created where this script is located
my_db = aresObj.db(modelPath=r'models', filename='test_model.py')
#Generate random data
record_set = []
for i in range(100):
record_set.append({'name': ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)), 'security_number': random.choice(range(10000))})
#get the column names for information
print(my_db.table('random_table').columns)
#insert the records in the database
my_db.insert('random_table', record_set, commit=True)
#get data using the fetch method which return an iterator
print list(my_db.select(['random_table']).fetch(limit=10))
#delete the first 20 records
my_db.delete('random_table').where([my_db.column('random_table', 'id') <= 20]).execute()
print list(my_db.select(['random_table']).fetch())
##delete with an or statement
my_db.delete('random_table').where([my_db.or_(my_db.column('random_table', 'id') == 21, my_db.column('random_table', 'id') == 55)]).execute()
#get data using the getData method which returns a pandas dataframe
print my_db.select(['random_table']).getData()
##delete with an and statement
my_db.delete('random_table').where([my_db.column('random_table', 'id') == 25]).where([my_db.column('random_table', 'name') != '']).execute()
#get data using the getData method which returns a pandas dataframe
print my_db.select(['random_table']).getData()
|
nilq/baby-python
|
python
|
import numpy as np
from boid import Boid
width = 100
height = 100
flock = [Boid(*np.random.rand(2)*100, width, height) for _ in range(5)]
def updatePositions():
global flock
for boid in flock:
boid.apply_behaviour(flock)
boid.update()
boid.edges()
print("-----FRAME 1-----")
updatePositions()
print("-----FRAME 2-----")
updatePositions()
|
nilq/baby-python
|
python
|
__version__ = "0.0.1"
version = __version__
|
nilq/baby-python
|
python
|
import pytest
from django.contrib.admin.options import get_content_type_for_model
from django.contrib.auth.models import Permission
from django.contrib.gis.geos import Point
from django.utils.timezone import now
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from bikesharing.models import Bike, Location, Lock, LockType, Rent
from cykel.models import CykelLogEntry
@pytest.fixture
def testuser_john_doe(django_user_model):
return django_user_model.objects.create(username="john", password="doe")
@pytest.fixture
def testuser_jane_canrent(django_user_model):
jane = django_user_model.objects.create(username="jane", password="canrent")
can_add_rent_permission = Permission.objects.get(name="Can add rent")
jane.user_permissions.add(can_add_rent_permission)
return jane
@pytest.fixture
def testuser_mary_canrent(django_user_model):
mary = django_user_model.objects.create(username="mary", password="canrent")
can_add_rent_permission = Permission.objects.get(name="Can add rent")
mary.user_permissions.add(can_add_rent_permission)
return mary
@pytest.fixture
def user_client_john_doe_logged_in(testuser_john_doe):
client = APIClient()
token, _ = Token.objects.get_or_create(user=testuser_john_doe)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
return client
@pytest.fixture
def user_client_jane_canrent_logged_in(testuser_jane_canrent):
client = APIClient()
token, _ = Token.objects.get_or_create(user=testuser_jane_canrent)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
return client
@pytest.fixture
def user_client_mary_canrent_logged_in(testuser_mary_canrent):
client = APIClient()
token, _ = Token.objects.get_or_create(user=testuser_mary_canrent)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
return client
@pytest.fixture
def lock_type_combination():
return LockType.objects.create(form_factor=LockType.FormFactor.COMBINATION_LOCK)
@pytest.fixture
def lock(lock_type_combination):
return Lock.objects.create(unlock_key="000000", lock_type=lock_type_combination)
@pytest.fixture
def another_lock(lock_type_combination):
return Lock.objects.create(unlock_key="000000", lock_type=lock_type_combination)
@pytest.fixture
def some_lock(lock_type_combination):
return Lock.objects.create(unlock_key="000000", lock_type=lock_type_combination)
@pytest.fixture
def different_lock(lock_type_combination):
return Lock.objects.create(unlock_key="000000", lock_type=lock_type_combination)
@pytest.fixture
def available_bike(lock):
return Bike.objects.create(
availability_status=Bike.Availability.AVAILABLE, bike_number="1337", lock=lock
)
@pytest.fixture
def disabled_bike():
return Bike.objects.create(
availability_status=Bike.Availability.DISABLED, bike_number="2342"
)
@pytest.fixture
def inuse_bike(another_lock):
return Bike.objects.create(
availability_status=Bike.Availability.IN_USE,
bike_number="8080",
lock=another_lock,
)
@pytest.fixture
def inuse_missing_bike(different_lock):
return Bike.objects.create(
availability_status=Bike.Availability.IN_USE,
state=Bike.State.MISSING,
bike_number="8404",
lock=different_lock,
)
@pytest.fixture
def missing_bike(some_lock):
return Bike.objects.create(
availability_status=Bike.Availability.AVAILABLE,
state=Bike.State.MISSING,
bike_number="404",
lock=some_lock,
)
@pytest.fixture
def rent_jane_running(testuser_jane_canrent, inuse_bike):
return Rent.objects.create(
rent_start=now(),
user=testuser_jane_canrent,
bike=inuse_bike,
)
@pytest.fixture
def rent_jane_running_missing(testuser_jane_canrent, inuse_missing_bike):
return Rent.objects.create(
rent_start=now(),
user=testuser_jane_canrent,
bike=inuse_missing_bike,
)
@pytest.mark.django_db
def test_get_rents_logged_in_with_renting_rights(
testuser_jane_canrent, user_client_jane_canrent_logged_in, rent_jane_running
):
response = user_client_jane_canrent_logged_in.get("/api/rent")
assert response.status_code == 200, response.content
assert len(response.json()) == 1
assert response.json()[0]["id"] == rent_jane_running.id
assert (
response.json()[0]["bike"]["bike_number"] == rent_jane_running.bike.bike_number
)
@pytest.mark.django_db
def test_start_rent_logged_in_without_renting_rights(
testuser_john_doe, user_client_john_doe_logged_in, available_bike
):
data = {"bike": available_bike.bike_number}
response = user_client_john_doe_logged_in.post("/api/rent", data)
assert response.status_code == 403, response.content
available_bike.refresh_from_db()
assert available_bike.availability_status == Bike.Availability.AVAILABLE
@pytest.mark.django_db
def test_start_rent_logged_out(available_bike):
data = {"bike": available_bike.bike_number}
client = APIClient()
response = client.post("/api/rent", data)
assert response.status_code == 401, response.content
available_bike.refresh_from_db()
assert available_bike.availability_status == Bike.Availability.AVAILABLE
@pytest.mark.django_db
def test_start_rent_logged_in_with_renting_rights(
testuser_jane_canrent, user_client_jane_canrent_logged_in, available_bike
):
data = {"bike": available_bike.bike_number}
response = user_client_jane_canrent_logged_in.post("/api/rent", data)
assert response.status_code == 201, response.content
available_bike.refresh_from_db()
assert available_bike.availability_status == Bike.Availability.IN_USE
@pytest.mark.django_db
def test_start_rent_and_unlock_logged_in_with_renting_rights(
testuser_jane_canrent, user_client_jane_canrent_logged_in, available_bike
):
data = {"bike": available_bike.bike_number}
response = user_client_jane_canrent_logged_in.post("/api/rent", data)
assert response.status_code == 201, response.content
unlock_url = response.json()["unlock_url"]
response = user_client_jane_canrent_logged_in.post(unlock_url)
assert response.status_code == 200, response.content
assert response.json()["data"]["unlock_key"] == "000000"
@pytest.mark.django_db
def test_start_rent_inuse_bike_logged_in_with_renting_rights(
testuser_jane_canrent, user_client_jane_canrent_logged_in, inuse_bike
):
data = {"bike": inuse_bike.bike_number}
response = user_client_jane_canrent_logged_in.post("/api/rent", data)
assert response.status_code == 400, response.content
@pytest.mark.django_db
def test_start_rent_other_inuse_bike_logged_in_with_renting_rights(
testuser_mary_canrent,
user_client_mary_canrent_logged_in,
rent_jane_running,
inuse_bike,
):
data = {"bike": inuse_bike.bike_number}
response = user_client_mary_canrent_logged_in.post("/api/rent", data)
assert response.status_code == 400, response.content
@pytest.mark.django_db
def test_start_rent_unknown_bike_logged_in_with_renting_rights(
testuser_jane_canrent, user_client_jane_canrent_logged_in
):
data = {"bike": 404}
response = user_client_jane_canrent_logged_in.post("/api/rent", data)
assert response.status_code == 400, response.content
@pytest.mark.django_db
def test_start_rent_logged_in_with_renting_rights_and_location_from_client(
testuser_jane_canrent, user_client_jane_canrent_logged_in, available_bike
):
data = {"bike": available_bike.bike_number, "lat": -99.99, "lng": -89.99}
response = user_client_jane_canrent_logged_in.post("/api/rent", data)
assert response.status_code == 201, response.content
rent_id = response.json()["id"]
available_bike.refresh_from_db()
assert available_bike.availability_status == Bike.Availability.IN_USE
rent = Rent.objects.get(id=rent_id)
assert rent.start_location is not None
assert rent.start_location.geo.x == -89.99
assert rent.start_location.geo.y == -99.99
@pytest.mark.django_db
def test_start_rent_logged_in_with_renting_rights_and_location_from_client_missing_bike(
testuser_jane_canrent, user_client_jane_canrent_logged_in, missing_bike
):
data = {"bike": missing_bike.bike_number, "lat": -99.99, "lng": -89.99}
response = user_client_jane_canrent_logged_in.post("/api/rent", data)
assert response.status_code == 201, response.content
rent_id = response.json()["id"]
missing_bike.refresh_from_db()
assert missing_bike.availability_status == Bike.Availability.IN_USE
rent = Rent.objects.get(id=rent_id)
assert rent.start_location is not None
assert rent.start_location.geo.x == -89.99
assert rent.start_location.geo.y == -99.99
logentry = CykelLogEntry.objects.get(
content_type=get_content_type_for_model(missing_bike),
object_id=missing_bike.pk,
action_type="cykel.bike.missing_reporting",
)
assert logentry is not None
assert logentry.data["location_id"] is not None
assert logentry.data["location_id"] == rent.start_location.id
@pytest.mark.django_db
def test_start_rent_logged_in_with_renting_rights_missing_bike(
testuser_jane_canrent, user_client_jane_canrent_logged_in, missing_bike
):
data = {"bike": missing_bike.bike_number}
response = user_client_jane_canrent_logged_in.post("/api/rent", data)
assert response.status_code == 201, response.content
rent_id = response.json()["id"]
missing_bike.refresh_from_db()
assert missing_bike.availability_status == Bike.Availability.IN_USE
rent = Rent.objects.get(id=rent_id)
assert rent.start_location is None
logentry = CykelLogEntry.objects.get(
content_type=get_content_type_for_model(missing_bike),
object_id=missing_bike.pk,
action_type="cykel.bike.missing_reporting",
)
assert logentry is not None
assert "location_id" not in logentry.data
@pytest.mark.django_db
def test_end_rent_logged_in_with_renting_rights(
testuser_jane_canrent,
user_client_jane_canrent_logged_in,
rent_jane_running,
inuse_bike,
):
data = {}
response = user_client_jane_canrent_logged_in.post(
"/api/rent/{}/finish".format(rent_jane_running.id), data
)
assert response.status_code == 200, response.content
assert response.json()["success"] is True
rent_jane_running.refresh_from_db()
assert rent_jane_running.rent_end is not None
inuse_bike.refresh_from_db()
assert inuse_bike.availability_status == Bike.Availability.AVAILABLE
@pytest.mark.django_db
def test_end_rent_logged_in_with_renting_rights_and_location_from_bike(
testuser_jane_canrent,
user_client_jane_canrent_logged_in,
rent_jane_running,
inuse_bike,
):
loc = Location.objects.create(
bike=inuse_bike, source=Location.Source.TRACKER, reported_at=now()
)
loc.geo = Point(-89.99, -99.99, srid=4326)
loc.save()
data = {}
response = user_client_jane_canrent_logged_in.post(
"/api/rent/{}/finish".format(rent_jane_running.id), data
)
assert response.status_code == 200, response.content
rent_jane_running.refresh_from_db()
assert rent_jane_running.rent_end is not None
assert rent_jane_running.end_location is not None
assert rent_jane_running.end_location.geo.x == -89.99
assert rent_jane_running.end_location.geo.y == -99.99
inuse_bike.refresh_from_db()
assert inuse_bike.availability_status == Bike.Availability.AVAILABLE
assert inuse_bike.public_geolocation().source == Location.Source.TRACKER
assert inuse_bike.public_geolocation().geo.x == -89.99
assert inuse_bike.public_geolocation().geo.y == -99.99
@pytest.mark.django_db
def test_end_rent_logged_in_with_renting_rights_and_location_from_client(
testuser_jane_canrent,
user_client_jane_canrent_logged_in,
rent_jane_running,
inuse_bike,
):
data = {"lat": -99.99, "lng": -89.99}
response = user_client_jane_canrent_logged_in.post(
"/api/rent/{}/finish".format(rent_jane_running.id), data
)
assert response.status_code == 200, response.content
assert response.json()["success"] is True
rent_jane_running.refresh_from_db()
assert rent_jane_running.rent_end is not None
assert rent_jane_running.end_location is not None
assert rent_jane_running.end_location.geo.x == -89.99
assert rent_jane_running.end_location.geo.y == -99.99
inuse_bike.refresh_from_db()
assert inuse_bike.availability_status == Bike.Availability.AVAILABLE
assert inuse_bike.public_geolocation().source == Location.Source.USER
assert inuse_bike.public_geolocation().geo.x == -89.99
assert inuse_bike.public_geolocation().geo.y == -99.99
@pytest.mark.django_db
def test_end_rent_logged_in_with_renting_rights_and_location_from_client_missing_bike(
testuser_jane_canrent,
user_client_jane_canrent_logged_in,
rent_jane_running_missing,
inuse_missing_bike,
):
data = {"lat": -99.99, "lng": -89.99}
response = user_client_jane_canrent_logged_in.post(
"/api/rent/{}/finish".format(rent_jane_running_missing.id), data
)
assert response.status_code == 200, response.content
assert response.json()["success"] is True
rent_jane_running_missing.refresh_from_db()
assert rent_jane_running_missing.rent_end is not None
assert rent_jane_running_missing.end_location is not None
assert rent_jane_running_missing.end_location.geo.x == -89.99
assert rent_jane_running_missing.end_location.geo.y == -99.99
inuse_missing_bike.refresh_from_db()
assert inuse_missing_bike.availability_status == Bike.Availability.AVAILABLE
assert inuse_missing_bike.public_geolocation().source == Location.Source.USER
assert inuse_missing_bike.public_geolocation().geo.x == -89.99
assert inuse_missing_bike.public_geolocation().geo.y == -99.99
logentry = CykelLogEntry.objects.get(
content_type=get_content_type_for_model(inuse_missing_bike),
object_id=inuse_missing_bike.pk,
action_type="cykel.bike.missing_reporting",
)
assert logentry is not None
assert logentry.data["location_id"] is not None
assert logentry.data["location_id"] == rent_jane_running_missing.end_location.id
@pytest.mark.django_db
def test_end_rent_logged_out(
rent_jane_running,
inuse_bike,
):
client = APIClient()
data = {}
response = client.post("/api/rent/{}/finish".format(rent_jane_running.id), data)
assert response.status_code == 401, response.content
rent_jane_running.refresh_from_db()
assert rent_jane_running.rent_end is None
inuse_bike.refresh_from_db()
assert inuse_bike.availability_status == Bike.Availability.IN_USE
@pytest.mark.django_db
def test_end_rent_unknown_logged_in_with_renting_rights(
testuser_jane_canrent,
user_client_jane_canrent_logged_in,
):
data = {}
response = user_client_jane_canrent_logged_in.post(
"/api/rent/{}/finish".format(99), data
)
assert response.status_code == 404, response.content
|
nilq/baby-python
|
python
|
from uwsgidecorators import *
import gevent
@spool
def longtask(*args):
print args
return uwsgi.SPOOL_OK
def level2():
longtask.spool(foo='bar',test1='test2')
def level1():
gevent.spawn(level2)
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
gevent.spawn(level1)
for i in range(100):
yield "counter: %d<br/>" % i
|
nilq/baby-python
|
python
|
st=input("Enter String")
r=st.split(" ")
l=[]
s=" "
for i in r:
d=list(i)
if d[0]=='i' or d[0]=='o':
for ele in d:
s=s+ele
l.append(s)
s=" "
vowel=" ".join(l)
print(vowel)
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import secrets
import sys
import tempfile
import time
import boto3
import bottle
import sqlalchemy as db
import common.auth as _auth
import common.helpers as util
from common.config import config
from common.logging import logger
from models.badge import BadgeModel
from models.dataset import AccessTypeEnum, DatasetModel
from models.model import DeploymentStatusEnum, ModelModel
from models.score import ScoreModel
from models.task import AnnotationVerifierMode, TaskModel, train_file_metrics
from models.user import UserModel
from .tasks import ensure_owner_or_admin
sys.path.append("../evaluation") # noqa isort:skip
from utils.helpers import ( # noqa isort:skip
get_data_s3_path, # noqa isort:skip
get_predictions_s3_path, # noqa isort:skip
parse_s3_outfile, # noqa isort:skip
send_eval_request, # noqa isort:skip
) # noqa isort:skip
@bottle.post("/models/upload_train_files/<tid:int>/<model_name>")
@_auth.requires_auth
def do_upload_via_train_files(credentials, tid, model_name):
u = UserModel()
user_id = credentials["id"]
user = u.get(user_id)
if not user:
logger.error("Invalid user detail for id (%s)" % (user_id))
bottle.abort(404, "User information not found")
tm = TaskModel()
task = tm.get(tid)
annotation_config = util.json_decode(task.annotation_config_json)
if "train_file_metric" not in annotation_config:
bottle.abort(
403,
"""This task does not allow train file uploads. Submit a model instead.""",
)
train_file_metric = train_file_metrics[
annotation_config["train_file_metric"]["type"]
]
train_file_metric_constructor_args = annotation_config["train_file_metric"][
"constructor_args"
]
m = ModelModel()
if (
bottle.default_app().config["mode"] == "prod"
and m.getCountByUidTidAndHrDiff(
user_id, tid=task.id, hr_diff=task.dynalab_hr_diff
)
>= task.dynalab_threshold
):
logger.error("Submission limit reached for user (%s)" % (user_id))
bottle.abort(429, "Submission limit reached")
train_files = {}
dm = DatasetModel()
datasets = list(dm.getByTid(tid))
dataset_names = [dataset.name for dataset in datasets]
for name in dataset_names:
train_files[name] = bottle.request.files.get(name)
# Users don't need to upload train sets for all datasets.
train_files = {
name: train_files[name]
for name, upload in train_files.items()
if train_files[name] is not None
}
for dataset in datasets:
if (
dataset.access_type == AccessTypeEnum.scoring
and dataset.name not in train_files.keys()
):
bottle.abort(400, "Need to upload train files for all leaderboard datasets")
parsed_uploads = {}
# Ensure correct format
for name, upload in train_files.items():
try:
s3_uri = f"s3://{task.s3_bucket}/" + get_data_s3_path(
task.task_code, name + ".jsonl"
)
s3_client = boto3.client(
"s3",
aws_access_key_id=config["aws_access_key_id"],
aws_secret_access_key=config["aws_secret_access_key"],
region_name=task.aws_region,
)
parsed_test_file = parse_s3_outfile(s3_client, s3_uri)
parsed_prediction_file = train_file_metric(
util.json_decode(upload.file.read().decode("utf-8")),
parsed_test_file,
train_file_metric_constructor_args,
)
parsed_uploads[name] = parsed_prediction_file
except Exception as ex:
logger.exception(ex)
bottle.abort(400, "Invalid train file")
endpoint_name = f"ts{int(time.time())}-{model_name}"
status_dict = {}
# Create local model db object
model = m.create(
task_id=tid,
user_id=user_id,
name=model_name,
shortname="",
longdesc="",
desc="",
upload_datetime=db.sql.func.now(),
endpoint_name=endpoint_name,
deployment_status=DeploymentStatusEnum.predictions_upload,
secret=secrets.token_hex(),
)
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as tmp:
for dataset_name, parsed_upload in parsed_uploads.items():
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as tmp:
for datum in parsed_upload:
datum["id"] = datum["uid"] # TODO: right now, dynalab models
# Expect an input with "uid" but output "id" in their predictions.
# Why do we use two seperate names for the same thing? Can we make
# this consistent?
del datum["uid"]
tmp.write(util.json_encode(datum) + "\n")
tmp.close()
ret = _eval_dataset(dataset_name, endpoint_name, model, task, tmp.name)
status_dict.update(ret)
return util.json_encode({"success": "ok", "model_id": model.id})
@bottle.post("/models/upload_predictions/<tid:int>/<model_name>")
@_auth.requires_auth
def do_upload_via_predictions(credentials, tid, model_name):
u = UserModel()
user_id = credentials["id"]
user = u.get(user_id)
if not user:
logger.error("Invalid user detail for id (%s)" % (user_id))
bottle.abort(404, "User information not found")
tm = TaskModel()
task = tm.get(tid)
if not task.has_predictions_upload:
bottle.abort(
403,
"""This task does not allow prediction uploads. Submit a model instead.""",
)
m = ModelModel()
if (
bottle.default_app().config["mode"] == "prod"
and m.getCountByUidTidAndHrDiff(
user_id, tid=task.id, hr_diff=task.dynalab_hr_diff
)
>= task.dynalab_threshold
):
logger.error("Submission limit reached for user (%s)" % (user_id))
bottle.abort(429, "Submission limit reached")
uploads = {}
dm = DatasetModel()
datasets = list(dm.getByTid(tid))
dataset_names = [dataset.name for dataset in datasets]
for name in dataset_names:
uploads[name] = bottle.request.files.get(name)
# Users don't need to upload preds for all datasets.
uploads = {
name: uploads[name]
for name, upload in uploads.items()
if uploads[name] is not None
}
for dataset in datasets:
if (
dataset.access_type == AccessTypeEnum.scoring
and dataset.name not in uploads.keys()
):
bottle.abort(400, "Need to upload predictions for all leaderboard datasets")
parsed_uploads = {}
# Ensure correct format
for name, upload in uploads.items():
try:
parsed_upload = [
util.json_decode(line)
for line in upload.file.read().decode("utf-8").splitlines()
]
for io in parsed_upload:
if (
not task.verify_annotation(
io, mode=AnnotationVerifierMode.predictions_upload
)
or "uid" not in io
):
bottle.abort(400, "Invalid prediction file")
parsed_uploads[name] = parsed_upload
except Exception as ex:
logger.exception(ex)
bottle.abort(400, "Invalid prediction file")
endpoint_name = f"ts{int(time.time())}-{model_name}"
status_dict = {}
# Create local model db object
model = m.create(
task_id=tid,
user_id=user_id,
name=model_name,
shortname="",
longdesc="",
desc="",
upload_datetime=db.sql.func.now(),
endpoint_name=endpoint_name,
deployment_status=DeploymentStatusEnum.predictions_upload,
secret=secrets.token_hex(),
)
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as tmp:
for dataset_name, parsed_upload in parsed_uploads.items():
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as tmp:
for datum in parsed_upload:
datum["id"] = datum["uid"] # TODO: right now, dynalab models
# Expect an input with "uid" but output "id" in their predictions.
# Why do we use two seperate names for the same thing? Can we make
# this consistent?
del datum["uid"]
tmp.write(util.json_encode(datum) + "\n")
tmp.close()
ret = _eval_dataset(dataset_name, endpoint_name, model, task, tmp.name)
status_dict.update(ret)
return util.json_encode({"success": "ok", "model_id": model.id})
def _eval_dataset(dataset_name, endpoint_name, model, task, afile):
try:
_upload_prediction_file(
afile=afile,
task_code=task.task_code,
s3_bucket=task.s3_bucket,
endpoint_name=endpoint_name,
dataset_name=dataset_name,
)
eval_config = {
"aws_access_key_id": config["eval_aws_access_key_id"],
"aws_secret_access_key": config["eval_aws_secret_access_key"],
"aws_region": config["eval_aws_region"],
"evaluation_sqs_queue": config["evaluation_sqs_queue"],
}
ret = send_eval_request(
eval_server_id=task.eval_server_id,
model_id=model.id,
dataset_name=dataset_name,
config=eval_config,
logger=logger,
)
except Exception as e:
logger.exception(e)
bottle.abort(400, "Could not upload file: %s" % (e))
return {dataset_name: {"success": ret}}
def _upload_prediction_file(afile, task_code, s3_bucket, endpoint_name, dataset_name):
client = boto3.client(
"s3",
aws_access_key_id=config["eval_aws_access_key_id"],
aws_secret_access_key=config["eval_aws_secret_access_key"],
region_name=config["eval_aws_region"],
)
path = get_predictions_s3_path(
endpoint_name=endpoint_name, task_code=task_code, dataset_name=dataset_name
)
response = client.upload_file(afile, s3_bucket, path)
if response:
logger.info(response)
return path
@bottle.get("/models/<mid:int>")
def get_model(mid):
m = ModelModel()
model = m.getPublishedModel(mid)
if not model:
bottle.abort(404, "Not found")
# Also get this model's scores?
return util.json_encode(model.to_dict())
@bottle.get("/models/<mid:int>/details")
@_auth.auth_optional
def get_model_detail(credentials, mid):
m = ModelModel()
s = ScoreModel()
dm = DatasetModel()
try:
query_result = m.getModelUserByMid(mid)
model = query_result[0].to_dict()
# Secure to read unpublished model detail for only owner
if (
not query_result[0].is_published
and query_result[0].uid != credentials["id"]
):
ensure_owner_or_admin(query_result[0].tid, credentials["id"])
is_current_user = util.is_current_user(query_result[1].id, credentials)
if not is_current_user and query_result[0].is_anonymous:
model["username"] = None
model["uid"] = None
else:
model["username"] = query_result[1].username
# Construct Score information based on model id
scores = s.getByMid(mid)
datasets = dm.getByTid(model["tid"])
did_to_dataset_name = {}
did_to_dataset_access_type = {}
did_to_dataset_longdesc = {}
did_to_dataset_source_url = {}
for dataset in datasets:
did_to_dataset_name[dataset.id] = dataset.name
did_to_dataset_access_type[dataset.id] = dataset.access_type
did_to_dataset_longdesc[dataset.id] = dataset.longdesc
did_to_dataset_source_url[dataset.id] = dataset.source_url
fields = ["accuracy", "perf_std", "round_id", "did", "metadata_json"]
s_dicts = [
dict(
zip(fields, d),
**{
"dataset_name": did_to_dataset_name.get(d.did, None),
"dataset_access_type": did_to_dataset_access_type.get(d.did, None),
"dataset_longdesc": did_to_dataset_longdesc.get(d.did, None),
"dataset_source_url": did_to_dataset_source_url.get(d.did, None),
},
)
for d in scores
]
model["leaderboard_scores"] = list(
filter(
lambda s_dict: s_dict["dataset_access_type"] == AccessTypeEnum.scoring,
s_dicts,
)
)
model["non_leaderboard_scores"] = list(
filter(
lambda s_dict: s_dict["dataset_access_type"] == AccessTypeEnum.standard,
s_dicts,
)
)
model["deployment_status"] = model["deployment_status"].name
model["evaluation_status"] = model["evaluation_status"].name
return util.json_encode(model)
except AssertionError:
logger.exception("Not authorized to access unpublished model detail")
bottle.abort(403, "Not authorized to access model detail")
except Exception as ex:
logger.exception("Model detail exception : (%s)" % (ex))
bottle.abort(404, "Not found")
@bottle.put("/models/<mid:int>/update")
@_auth.requires_auth
def update_model(credentials, mid):
m = ModelModel()
data = bottle.request.json
if not util.check_fields(data, ["name", "description"]):
bottle.abort(400, "Missing data")
try:
model = m.getUnpublishedModelByMid(mid)
if model.uid != credentials["id"]:
logger.error(
"Original user ({}) and the modification tried by ({})".format(
model.uid, credentials["id"]
)
)
bottle.abort(401, "Operation not authorized")
m.update(
model.id,
name=data["name"],
longdesc=data["description"],
params=data["params"],
languages=data["languages"],
license=data["license"],
source_url=data["source_url"],
model_card=data["model_card"],
is_anonymous=data["is_anonymous"],
is_published=False,
)
return {"status": "success"}
except db.orm.exc.NoResultFound:
bottle.abort(404, "Model Not found")
except Exception as e:
logger.exception("Could not update model details: %s" % (e))
bottle.abort(400, "Could not update model details: %s" % (e))
@bottle.put("/models/<mid:int>/revertstatus")
@_auth.requires_auth
def revert_model_status(credentials, mid):
m = ModelModel()
try:
model = m.getUnpublishedModelByMid(mid)
if model.uid != credentials["id"]:
logger.error(
"Original user ({}) and the modification tried by ({})".format(
model.uid, credentials["id"]
)
)
bottle.abort(401, "Operation not authorized")
m.update(model.id, is_published=not model.is_published)
model = m.getUnpublishedModelByMid(mid)
um = UserModel()
user = um.get(model.uid)
bm = BadgeModel()
if model.is_published:
badge_names = bm.handlePublishModel(user, model)
return {"status": "success", "badges": "|".join(badge_names)}
bm.handleUnpublishModel(user, model)
return {"status": "success"}
except db.orm.exc.NoResultFound:
bottle.abort(404, "Model Not found")
except Exception as e:
logger.exception("Could not update model details: %s" % (e))
bottle.abort(400, "Could not update model details: %s" % (e))
@bottle.post("/models/upload/s3")
@_auth.requires_auth
def upload_to_s3(credentials):
# Authentication
u = UserModel()
user_id = credentials["id"]
user = u.get(user_id)
if not user:
logger.error("Invalid user detail for id (%s)" % (user_id))
bottle.abort(404, "User information not found")
# Upload file to S3
model_name = bottle.request.forms.get("name")
task_code = bottle.request.forms.get("taskCode")
if not task_code:
bottle.abort(404, "No task requested")
t = TaskModel()
task = t.getByTaskCode(task_code)
if not task:
bottle.abort(404, "Task not found")
if not task.submitable:
bottle.abort(403, "Task not available for model submission")
m = ModelModel()
if (
bottle.default_app().config["mode"] == "prod"
and m.getCountByUidTidAndHrDiff(
user_id, tid=task.id, hr_diff=task.dynalab_hr_diff
)
>= task.dynalab_threshold
):
logger.error("Submission limit reached for user (%s)" % (user_id))
bottle.abort(429, "Submission limit reached")
session = boto3.Session(
aws_access_key_id=config["aws_access_key_id"],
aws_secret_access_key=config["aws_secret_access_key"],
region_name=config["aws_region"],
)
bucket_name = task.s3_bucket
logger.info(f"Using AWS bucket {bucket_name} for task {task_code}")
endpoint_name = f"ts{int(time.time())}-{model_name}"[:63]
s3_filename = f"{endpoint_name}.tar.gz"
s3_path = f"torchserve/models/{task_code}/{s3_filename}"
logger.info(f"Uploading {model_name} to S3 at {s3_path} for user {user_id}")
try:
s3_client = session.client("s3")
tarball = bottle.request.files.get("tarball")
response = s3_client.upload_fileobj(tarball.file, bucket_name, s3_path)
if response:
logger.info(f"Response from the mar file upload to s3 {response}")
except Exception as ex:
logger.exception(ex)
bottle.abort(400, "upload failed")
# Update database entry
model = m.create(
task_id=task.id,
user_id=user_id,
name=model_name,
shortname="",
longdesc="",
desc="",
upload_datetime=db.sql.func.now(),
endpoint_name=endpoint_name,
deployment_status=DeploymentStatusEnum.uploaded,
secret=secrets.token_hex(),
)
um = UserModel()
um.incrementModelSubmitCount(user.to_dict()["id"])
# send SQS message
logger.info(f"Send message to sqs - enqueue model {model_name} for deployment")
sqs = session.resource("sqs")
queue = sqs.get_queue_by_name(QueueName=config["builder_sqs_queue"])
queue.send_message(
MessageBody=util.json_encode(
{"model_id": model.id, "s3_uri": f"s3://{bucket_name}/{s3_path}"}
)
)
@bottle.get("/models/<mid:int>/deploy")
@_auth.requires_auth
def deploy_model_from_s3(credentials, mid):
# Authentication (only authenticated users can redeploy models for interaction)
u = UserModel()
user_id = credentials["id"]
user = u.get(user_id)
if not user:
logger.error("Invalid user detail for id (%s)" % (user_id))
bottle.abort(404, "User information not found")
m = ModelModel()
model = m.getUnpublishedModelByMid(mid)
model_owner = model.uid == user.id
if (not model.is_published) and (not model_owner):
bottle.abort(403, "Model is not published and user is not model owner")
if model.deployment_status != DeploymentStatusEnum.takendownnonactive:
bottle.abort(
403, "Attempting to deploy a model not taken down due to inactivity"
)
model_name = model.name
t = TaskModel()
task = t.getByTaskId(model.tid)
task_code = task.task_code
bucket_name = task.s3_bucket
endpoint_name = model.endpoint_name
s3_filename = f"{endpoint_name}.tar.gz"
s3_path = f"torchserve/models/{task_code}/{s3_filename}"
# Update database entry
session = boto3.Session(
aws_access_key_id=config["aws_access_key_id"],
aws_secret_access_key=config["aws_secret_access_key"],
region_name=config["aws_region"],
)
# send SQS message
logger.info(f"Send message to sqs - enqueue model {model_name} for re-deployment")
sqs = session.resource("sqs")
queue = sqs.get_queue_by_name(QueueName=config["builder_sqs_queue"])
queue.send_message(
MessageBody=util.json_encode(
{
"model_id": model.id,
"s3_uri": f"s3://{bucket_name}/{s3_path}",
"endpoint_only": True,
}
)
)
return {"status": "success"}
|
nilq/baby-python
|
python
|
import jax.numpy as jnp
from jax import jit
from onnx_jax.handlers.backend_handler import BackendHandler
from onnx_jax.handlers.handler import onnx_op
from onnx_jax.pb_wrapper import OnnxNode
@onnx_op("Less")
class Less(BackendHandler):
@classmethod
def _common(cls, node: OnnxNode, **kwargs):
@jit
def _less(a, b):
return jnp.less(a, b)
return _less
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_7(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_9(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
|
nilq/baby-python
|
python
|
from flask import Flask, jsonify, request, render_template, flash, redirect, url_for
from flask_cors import CORS
import subprocess
from subprocess import Popen, PIPE
from subprocess import check_output
import pandas as pd
import pickle
import sklearn
import numpy as np
from PIL import Image
import os
from werkzeug.utils import secure_filename
from skimage import io, transform
# import matplotlib.pyplot as plt
# configuration
DEBUG = True
# load model
# logreg_model = pickle.load(open("model_.pkl", "rb"))
# instatiate app
app = Flask(__name__)
app.config.from_object(__name__)
UPLOAD_FOLDER = "static/uploads"
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
app.config["AttGAN_INPUT_FOLDER"] = "static/input_images/data"
app.config["AttGAN_OUTPUT_FOLDER"] = "static/output/AttGAN_128/samples_testing_2"
ALLOWED_EXTENSIONS = {"png", "jpg", "jpeg", "txt"}
# define user defined functions
def allowed_file(filename):
"""
read and test for allowed file types
"""
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
# enable CORS
CORS(app, resources={r"/*": {"origins": "*"}})
# define routes
@app.route("/", methods=["GET", "POST"])
def logreg_form():
"""
run simple logistic regression model and return output of model
"""
if request.method == "POST":
input = request.form.get("submission")
model_input = np.array(int(input))
result = logreg_model.predict(model_input.reshape(-1, 1))
return render_template("home.html", input=int(model_input), output=int(result))
else:
return render_template("home.html", input="", output="")
@app.route("/uploads/<filename>")
def uploaded_file(filename):
"""
functioning, but not currently necessary. return url endpoint with uploaded filename.
"""
return send_from_directory(app.config["UPLOAD_FOLDER"], filename)
@app.route("/test1", methods=["GET", "POST"])
def test1():
"""
test calling python script from command line
"""
if request.method == "GET":
# py_file = os.path.join(app.root_path, "tmp1.py")
py_file = os.path.join(app.root_path, "test.py")
# python_command = "python '{0}'".format(py_file)
python_command = "CUDA_VISIBLE_DEVICES=0 python {0} --experiment_name AttGAN_128 --flask_path {1}".format(py_file, app.root_path)
try:
stdout = check_output([python_command], shell=True)
return """<title>Success</title>
<h1>Images generated!</h1>
"""
except subprocess.CalledProcessError as e:
return "An error occurred while trying to fetch task status updates."
else:
return """<title>500 Error</title>
<h1>Error!</h1>
<p>Only GET is supported right now</p>""", 500
@app.route('/test2')
def test2():
input = os.path.join(app.config["AttGAN_INPUT_FOLDER"], "004501.jpg")
output = os.path.join(app.config["AttGAN_OUTPUT_FOLDER"], "1.jpg")
if request.method == "GET":
return render_template("attgan_image.html", input=input, output=output)
@app.route("/image", methods=["GET", "POST"])
def image_transformation():
"""
user submits an image to a form
save image to local directory (UPLOAD_FOLDER)
convert image to grayscale
"""
if request.method == "POST":
file = request.files["image"]
transform_option = request.form.get("transform_option")
if file and allowed_file(file.filename):
# save original to directory
filename = secure_filename(file.filename)
file.save(os.path.join(app.root_path, app.config["UPLOAD_FOLDER"], filename))
if transform_option == "RGB":
# read image and transform to grayscale
im = io.imread(file, plugin="matplotlib")
im_mod = Image.fromarray(im).convert("L")
im_mod_filename = "im_mod_rgb_" + filename
elif transform_option == "Rotate":
# read image and rotate
im = io.imread(file, plugin="matplotlib")
im_mod = Image.fromarray(im).rotate(90)
im_mod_filename = "im_mod_rotate_" + filename
im_mod.save(os.path.join(app.root_path, app.config["UPLOAD_FOLDER"], im_mod_filename))
# define input image and output image prior to returning on webpage
input = os.path.join(app.config["UPLOAD_FOLDER"], filename)
output = os.path.join(app.config["UPLOAD_FOLDER"], im_mod_filename)
return render_template("image.html", input=input, output=output)
else:
return render_template("image.html", input="", output="")
@app.route("/attgan", methods=["GET", "POST"])
def attgan():
"""
user submits an image to a form
save image to local directory (AttGAN_INPUT_FOLDER)
run model
return images
"""
if request.method == "POST":
file = request.files["image"]
transform_option = request.form.get("transform_option")
if file and allowed_file(file.filename):
# save original to directory
filename = secure_filename(file.filename)
file.save(os.path.join(app.root_path, app.config["AttGAN_INPUT_FOLDER"], filename))
im = io.imread(os.path.join(app.root_path, app.config["AttGAN_INPUT_FOLDER"], filename), plugin="matplotlib")
if Image.fromarray(im).height > 256:
resize_factor = Image.fromarray(im).height / 256
else:
resize_factor = 1
size = int(np.floor(Image.fromarray(im).width / resize_factor)), int(np.floor(Image.fromarray(im).height / resize_factor))
im_mod = Image.fromarray(im).resize(size)
im_mod.save(os.path.join(app.root_path, app.config["AttGAN_INPUT_FOLDER"], "004501.jpg"))
py_file = os.path.join(app.root_path, "test.py")
python_command = "CUDA_VISIBLE_DEVICES=0 python {0} --experiment_name AttGAN_128 --flask_path {1}".format(py_file, app.root_path)
stdout = check_output([python_command], shell=True)
# define input image and output image prior to returning on webpage
input = os.path.join(app.config["AttGAN_INPUT_FOLDER"], "004501.jpg")
output = os.path.join(app.config["AttGAN_OUTPUT_FOLDER"], "1.jpg")
return render_template("attgan.html", input=input, output=output, rand_num=np.random.randint(low=1, high=100000, size=1))
else:
return render_template("attgan.html", input="", output="", rand_num="")
if __name__ == "__main__":
app.run()
|
nilq/baby-python
|
python
|
# This File Will Loop execute.
from machine import Pin
import time
LED = Pin(18, Pin.OUT) # Set Running Led
# Python and WebDAV cross, which leads to the Python sequence is not stable.
# So You Can Switch Python and WebDAV through external Button to stable execute.
Button = Pin(27, Pin.IN)
while 0 == Button.value():
time.sleep(0.2) # Set 0.1s Python execute time.
LED.value(1)
time.sleep(0.2) # Set 0.1s Python execute time.
LED.value(0)
|
nilq/baby-python
|
python
|
from .utils import *
Any = Var(annotation=typing.Any)
AnyList = Var(annotation=list)
Int = Var(annotation=int)
Float = Var(annotation=float)
Str = Var(annotation=str)
Array = Var(annotation=np.ndarray, name='Array')
ArrayList = Var(annotation=TList[Array], name='ArrayList')
FloatDict = Var(annotation=TDict[str, float])
IntList = Var(annotation=TList[int])
AnyDict = Var(annotation=TDict[str, TAny])
IntMatrix = Var(annotation=TList[TList[int]])
DictOfIntLists = Var(annotation=TDict[str, TList[int]])
Frame = Var(annotation=pd.DataFrame)
VARS = (
Any, AnyList, Int, Float, Str, Array, ArrayList,
FloatDict, IntList, AnyDict, IntMatrix, DictOfIntLists, Frame
)
################################################################################
### ops
################################################################################
@op()
def inc(x:Int) -> Int:
return x + 1
@op()
def add(x:Int, y:Int) -> Int:
return x + y
@op()
def mean(x:AnyList) -> Any:
return sum(x) / len(x)
@op()
def add_int(x:Int, y:Int) -> Int:
return x + y
@superop()
def add_three(x:Int, y:Int, z:Int) -> Int:
intermediate = add_int(x=x, y=y)
return add_int(intermediate, z)
@op()
def int_mean(x:IntList) -> Float:
return sum(x) / len(x)
@op()
def dict_mean(x:AnyDict) -> Any:
return sum(x.values()) / len(x)
@op()
def get_prime_factors(x:Int) -> IntList:
if x < 2:
return []
nums = list(range(2, x + 1))
primes = [a for a in nums if x % a ==0
and all([a % div != 0 for div in nums if 1 < div and div < a])]
return primes
@op()
def mean_2d(arr:IntMatrix) -> Float:
means = [sum(x) / len(x) for x in arr]
return sum(means) / len(means)
@op()
def make_frame(columns:DictOfIntLists) -> Frame:
return pd.DataFrame(columns)
### an operation with multiple outputs
@op()
def inc_and_dec(x:Int) -> TTuple[Int, Int]:
return x + 1, x - 1
### an operation with no outputs
@op()
def log_some_things(x:Int, y:FloatDict, z:DictOfIntLists):
return
### an operation with dict outputs
@op()
def get_some_metrics(x:Int, y:IntList) -> FloatDict:
res = {
'a': 0.3,
'b': len(y) / 10
}
return res
################################################################################
### superops, unnamed types
################################################################################
@op()
def get_divisors(num:int) -> TList[int]:
return [x for x in range(1, num) if num % x == 0]
@op()
def sum_nums(nums:TList[int]) -> int:
return sum(nums)
@superop()
def get_max_len_divs(nums:TList[int]) -> TList[int]:
# return the divisors of the number with the most divisors among `nums`
all_divs = [get_divisors(num) for num in nums]
lengths = [len(x) for x in all_divs]
i = np.argmax(lengths)
return all_divs[i]
@superop()
def divisor_prefix(num:int, how_many:int) -> TList[int]:
# return a prefix of the number's divisors of the given length
divisors = get_divisors(num)
return divisors[:unwrap(how_many)]
###
OPS = (
inc, add, mean, add_int, add_three, int_mean, dict_mean, get_prime_factors,
mean_2d, make_frame, inc_and_dec, log_some_things, get_some_metrics,
get_divisors, sum_nums, get_max_len_divs, divisor_prefix,
)
|
nilq/baby-python
|
python
|
while True:
try:
a=input()
except EOFError:
break
except KeyboardInterrupt:
break
print(a)
|
nilq/baby-python
|
python
|
import logging
import numpy as np
import pandas as pd
import pytest
import calc # noqa
from const import ProdStatRange
from schemas import ProductionWellSet
from tests.utils import MockAsyncDispatch
from util.pd import validate_required_columns
logger = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def ihs_prod(json_fixture):
yield json_fixture("test_prod_calc.json")
@pytest.fixture
def prod_df(ihs_prod):
yield ProductionWellSet(wells=ihs_prod).df().copy(deep=True).sort_index()
@pytest.fixture
def prod_dispatcher(ihs_prod):
yield MockAsyncDispatch({"data": ihs_prod})
def test_validate_required_columns_raise():
with pytest.raises(KeyError):
validate_required_columns(required=["a", "b"], columns=["a", "c", "d"])
class TestProdStats:
def test_instantiate_df_ext(self):
pd.DataFrame.prodstats
@pytest.mark.parametrize(
"kwargs,expected",
[
(
{
"columns": ["oil", "gas", "water"],
"agg_type": "sum",
"include_zeroes": True,
"range_name": ProdStatRange.FIRST,
"months": 6,
"norm_by_label": None,
},
{
"oil": "oil_sum_first6mo",
"gas": "gas_sum_first6mo",
"water": "water_sum_first6mo",
},
),
(
{
"columns": ["oil", "gas", "water"],
"agg_type": "sum",
"include_zeroes": False,
"range_name": ProdStatRange.LAST,
"months": 3,
"norm_by_label": None,
},
{
"oil": "oil_sum_last3mo_nonzero",
"gas": "gas_sum_last3mo_nonzero",
"water": "water_sum_last3mo_nonzero",
},
),
(
{
"columns": ["oil", "gas", "water"],
"agg_type": "avg",
"include_zeroes": True,
"range_name": ProdStatRange.ALL,
"months": None,
"norm_by_label": None,
},
{"oil": "oil_avg", "gas": "gas_avg", "water": "water_avg"},
),
(
{
"columns": ["oil", "gas", "water"],
"agg_type": "sum",
"include_zeroes": True,
"range_name": ProdStatRange.PEAKNORM,
"months": 6,
"norm_by_label": "1k",
},
{
"oil": "oil_sum_peaknorm6mo_per1k",
"gas": "gas_sum_peaknorm6mo_per1k",
"water": "water_sum_peaknorm6mo_per1k",
},
),
],
)
def test_make_aliases(self, kwargs, expected):
actual = pd.DataFrame.prodstats.make_aliases(**kwargs)
assert expected == actual
def test_prod_bounds_by_well(self):
data = [
{"api10": 1234567890, "prod_date": "2019-01-01", "prod_month": 1},
{"api10": 1234567890, "prod_date": "2019-02-01", "prod_month": 2},
{"api10": 1234567890, "prod_date": "2019-03-01", "prod_month": 3},
{"api10": 9999999999, "prod_date": "2019-01-01", "prod_month": 1},
{"api10": 9999999999, "prod_date": "2019-02-01", "prod_month": 2},
{"api10": 9999999999, "prod_date": "2019-03-01", "prod_month": 3},
{"api10": 9999999999, "prod_date": "2019-04-01", "prod_month": 4},
]
df = pd.DataFrame(data).set_index(["api10", "prod_date"])
result = df.prodstats._prod_bounds_by_well()
for api10 in list(df.index.levels[0]):
assert result.loc[api10].start_month == df.loc[api10].prod_month.min()
assert result.loc[api10].end_month == df.loc[api10].prod_month.max()
assert result.loc[api10].start_date == df.loc[api10].index[0]
assert result.loc[api10].end_date == df.loc[api10].index[-1]
@pytest.mark.parametrize(
"range,months,result_min_month,result_max_month",
[
(ProdStatRange.FIRST, 1, 1, 1),
(ProdStatRange.LAST, 1, 14, 14),
(ProdStatRange.PEAKNORM, 1, 3, 3),
(ProdStatRange.FIRST, 3, 1, 3),
(ProdStatRange.LAST, 3, 12, 14),
(ProdStatRange.PEAKNORM, 3, 3, 5),
(ProdStatRange.FIRST, 6, 1, 6),
(ProdStatRange.LAST, 6, 9, 14),
(ProdStatRange.PEAKNORM, 6, 3, 8),
(ProdStatRange.ALL, None, 1, 14),
],
)
def test_get_monthly_range(
self, prod_df, range, months, result_min_month, result_max_month
):
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
peak30 = prod_df.prodstats.peak30()
prod_df["peak_norm_month"] = prod_df.prod_month - peak30.peak30_month
df = prod_df.prodstats.monthly_by_range(range, months=months)
ranges = (
df.reset_index(level=1)
.groupby(level=0)
.prod_month.describe()[["min", "max"]]
.astype(int)
)
assert ranges["min"].min() == ranges["min"].max() == result_min_month
assert ranges["max"].min() == ranges["max"].max() == result_max_month
def test_get_monthly_range_catch_range_name_without_months(self, prod_df):
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
peak30 = prod_df.prodstats.peak30()
prod_df["peak_norm_month"] = prod_df.prod_month - peak30.peak30_month
months = None
for range in ProdStatRange.members():
if range != ProdStatRange.ALL:
with pytest.raises(ValueError):
prod_df.prodstats.monthly_by_range(range, months=months)
else:
prod_df.prodstats.monthly_by_range(range, months=months)
def test_get_monthly_range_catch_range_name_with_months(self, prod_df):
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
peak30 = prod_df.prodstats.peak30()
prod_df["peak_norm_month"] = prod_df.prod_month - peak30.peak30_month
months = 6
for range in ProdStatRange.members():
if range != ProdStatRange.ALL:
prod_df.prodstats.monthly_by_range(range, months=months)
else:
with pytest.raises(ValueError):
prod_df.prodstats.monthly_by_range(range, months=months)
def test_melt(self, prod_df):
df = prod_df[["oil", "gas"]].groupby(level=0).max()
melted = df.prodstats.melt(prodstat_names=["oil", "gas"])
assert {*df.index} == {*melted.api10}
assert {*df.index} == {*melted.api10}
assert {*df.columns} == {*melted.name}
# @pytest.mark.parametrize("include_zeroes", [True, False])
def test_aggregate_range(self, prod_df):
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
result = prod_df.prodstats.aggregate_range(
range_name=ProdStatRange.FIRST,
agg_map={"oil": "sum", "gas": "sum"},
alias_map={"oil": "oil_alias", "gas": "gas_alias"},
include_zeroes=True,
months=6,
)
assert result.start_month.min() == result.start_month.max() == 1
assert result.end_month.min() == result.end_month.max() == 6
def test_aggregate_range_nonzero(self, prod_df):
prod_df = prod_df[["oil", "gas"]].copy(deep=True)
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
idx = prod_df.xs("2018-12-01", level=1, drop_level=False).index
prod_df.loc[idx, ["oil", "gas"]] = 0
result = prod_df.prodstats.aggregate_range(
range_name=ProdStatRange.FIRST,
agg_map={"oil": "sum", "gas": "sum"},
alias_map={"oil": "oil_alias", "gas": "gas_alias"},
include_zeroes=False,
months=6,
)
assert result.start_month.min() == result.start_month.max() == 1
assert result.end_month.min() == result.end_month.max() == 7
def test_aggregate_range_catch_unsorted(self, prod_df):
prod_df = prod_df.sort_values("oil")
with pytest.raises(ValueError):
prod_df.prodstats.aggregate_range(
range_name=ProdStatRange.FIRST,
agg_map={"oil": "sum", "gas": "sum"},
alias_map={"oil": "oil_alias", "gas": "gas_alias"},
include_zeroes=True,
months=6,
)
def test_inverval_calc(self, prod_df):
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
result = prod_df.prodstats.calc_prodstat(
columns=["oil", "gas"],
range_name=ProdStatRange.FIRST,
months=6,
agg_type="sum",
include_zeroes=True,
)
assert result.start_month.min() == result.start_month.max() == 1
assert result.end_month.min() == result.end_month.max() == 6
def test_inverval_calc_with_norm(self, prod_df):
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
prod_df["boe"] = prod_df.prodstats.boe()
result = prod_df.prodstats.calc_prodstat(
columns=["oil", "gas"],
range_name=ProdStatRange.FIRST,
months=6,
agg_type="sum",
include_zeroes=True,
norm_value=1000,
norm_suffix="1k",
)
assert result.start_month.min() == result.start_month.max() == 1
assert result.end_month.min() == result.end_month.max() == 6
def test_interval_calc_catch_bad_range_type(self, prod_df):
with pytest.raises(ValueError):
prod_df.prodstats.calc_prodstat(
columns=["oil", "gas"],
range_name="hello0o0oo0",
months=6,
agg_type="sum",
include_zeroes=True,
)
def test_inverval_calc_mean(self, prod_df):
range_name = ProdStatRange.FIRST
months = 6
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
actual = prod_df.prodstats.calc_prodstat(
columns=["oil", "gas"],
range_name=range_name,
months=months,
agg_type="mean",
include_zeroes=True,
).value
expected = (
prod_df.loc[:, ["oil", "gas"]]
.groupby(level=0)
.head(months)
.groupby(level=0)
.mean()
.reset_index()
.melt(id_vars=["api10"])
.set_index("api10")
).value
assert {*actual.values} == {*expected.values}
def test_peak30(self, prod_df):
prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
peak30 = prod_df.prodstats.peak30()
assert peak30.columns.tolist() == [
"peak30_date",
"peak30_oil",
"peak30_gas",
"peak30_month",
]
assert peak30.iloc[0].peak30_date == pd.Timestamp("2018-11-01")
assert peak30.iloc[0].peak30_oil == 27727
assert peak30.iloc[0].peak30_gas == 26699
assert peak30.iloc[0].peak30_month == 2
# def test_norm_to_ll(self, prod_df):
# prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
# df = prod_df.prodstats.norm_to_ll(1000, suffix="1k")
# assert df.index.names == ["api10", "prod_date"]
# assert df.columns.tolist() == ["oil_norm_1k"]
# assert prod_df.shape[0] == df.shape[0]
# expected = prod_df.oil.div(prod_df.perfll / 1000).groupby(level=0).sum()
# actual = df.groupby(level=0).sum()
# merged = expected.to_frame("original").join(actual)
# assert merged.original.sub(merged.oil_norm_1k).sum() == 0
# def test_norm_to_ll_with_suffix(self, prod_df):
# prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
# df = prod_df.loc[:, ["oil"]].prodstats.norm_to_ll(7500, suffix=7500)
# assert df.index.names == ["api10", "prod_date"]
# assert df.columns.tolist() == ["oil_norm_1k"]
# def test_norm_to_ll_ignore_missing_prod_columns(self, prod_df):
# prod_df["prod_month"] = prod_df.groupby(level=0).cumcount() + 1
# df = prod_df.prodstats.norm_to_ll(1000, suffix="1k")
# assert df.index.names == ["api10", "prod_date"]
# assert df.columns.tolist() == ["oil_norm_1k"]
# assert prod_df.shape[0] == df.shape[0]
# def test_norm_to_ll_catch_missing_prod_month(self, prod_df):
# with pytest.raises(KeyError):
# prod_df.prodstats.norm_to_ll(1000, suffix="1k")
def test_daily_avg_by_month(self, prod_df):
in_df = prod_df.loc[:, ["oil", "gas", "days_in_month"]]
df = in_df.prodstats.daily_avg_by_month(
columns=["oil", "gas"], days_column="days_in_month"
)
for x in ["oil_avg_daily", "gas_avg_daily"]:
assert x in df.columns
assert all(in_df.oil.div(in_df.days_in_month).values == df.oil_avg_daily.values)
assert all(in_df.gas.div(in_df.days_in_month).values == df.gas_avg_daily.values)
def test_daily_avg_by_well(self, prod_df):
in_df = prod_df[["oil", "gas", "days_in_month"]].groupby(level=0).sum()
df = in_df.prodstats.daily_avg_by_month(
columns=["oil", "gas"], days_column="days_in_month"
)
for x in ["oil_avg_daily", "gas_avg_daily"]:
assert x in df.columns
assert all(in_df.oil.div(in_df.days_in_month).values == df.oil_avg_daily.values)
assert all(in_df.gas.div(in_df.days_in_month).values == df.gas_avg_daily.values)
def test_pdp(self, prod_df):
prod_df["boe"] = prod_df.prodstats.boe()
pdp = prod_df.prodstats.pdp_by_well(
range_name=ProdStatRange.LAST, months=3, dollars_per_bbl=15, factor=0.6
)
series = pdp.iloc[0]
assert series.oil_pdp_last3mo_per15bbl == 3125
assert series.boe_pdp_last3mo_per15bbl == 4527
def test_pdp_handle_range_of_nan_values(self, prod_df):
prod_df = prod_df.loc[:, ["oil", "gas", "days_in_month"]]
prod_df["boe"] = prod_df.prodstats.boe()
prod_df.loc[
prod_df.groupby(level=0).tail(12).index, ["oil", "boe", "gas"]
] = np.nan
pdp = prod_df.prodstats.pdp_by_well(
range_name=ProdStatRange.LAST, months=3, dollars_per_bbl=15, factor=0.6
)
assert pdp.shape == (0, 2)
def test_pdp_fitler_zero_prod_months(self, prod_df):
prod_df = prod_df.loc[:, ["oil", "gas", "days_in_month"]]
prod_df["boe"] = prod_df.prodstats.boe()
prod_df.loc[prod_df.groupby(level=0).tail(2).index, ["oil", "boe", "gas"]] = 0
pdp = prod_df.prodstats.pdp_by_well(
range_name=ProdStatRange.LAST, months=3, dollars_per_bbl=15, factor=0.6
)
expected = (
prod_df.prodstats.daily_avg_by_month(["oil", "boe"], "days_in_month")
.mul(15)
.mul(0.6)
.rename(columns={"oil_avg_daily": "oil_pdp", "boe_avg_daily": "boe_pdp"})
)
expected = (
expected[expected.oil_pdp > 0]
.groupby(level=0)
.tail(1)
.loc[:, ["oil_pdp", "boe_pdp"]]
.astype(int)
)
assert np.array_equal(pdp.values, expected.values)
# if __name__ == "__main__":
# from util.jsontools import load_json
# @pytest.fixture
# def prod_df(ihs_prod):
# yield ProductionWellSet(wells=ihs_prod).df()
# ihs_prod = load_json(f"tests/fixtures/ihs_prod.json")
# prod_df: pd.DataFrame = next(prod_df.__wrapped__(ihs_prod))
|
nilq/baby-python
|
python
|
import glob
import matplotlib.image as mpimg
import os.path
from davg.lanefinding.Pipeline import Pipeline
def demonstrate_lane_finding_on_test_images(data):
pipeline = Pipeline()
for idx in range(len(data)):
# Read in a test image
img = mpimg.imread(data[idx])
# Process it
left_line, right_line = pipeline.visualize_lanes_using_matplotlib(img)
# UNCOMMENT TO RUN
test_images = glob.glob('test_images_s1_1296x972/*.jpg')
demonstrate_lane_finding_on_test_images(test_images[0:2])
|
nilq/baby-python
|
python
|
import argparse
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
import time
from dataloader import KITTILoader as DA
import utils.logger as logger
import models.anynet
parser = argparse.ArgumentParser(description='Anynet fintune on KITTI')
parser.add_argument('--maxdisp', type=int, default=192,
help='maxium disparity')
parser.add_argument('--loss_weights', type=float, nargs='+', default=[0.25, 0.5, 1., 1.])
parser.add_argument('--max_disparity', type=int, default=192)
parser.add_argument('--maxdisplist', type=int, nargs='+', default=[12, 3, 3])
parser.add_argument('--datatype', default='2015',
help='2015 or 2012')
parser.add_argument('--datapath', default=None, help='datapath')
parser.add_argument('--epochs', type=int, default=300,
help='number of epochs to train')
parser.add_argument('--train_bsize', type=int, default=6,
help='batch size for training (default: 6)')
parser.add_argument('--test_bsize', type=int, default=8,
help='batch size for testing (default: 8)')
parser.add_argument('--save_path', type=str, default='results/finetune_anynet/',
help='the path of saving checkpoints and log')
parser.add_argument('--resume', type=str, default=None,
help='resume path')
parser.add_argument('--lr', type=float, default=5e-4,
help='learning rate')
parser.add_argument('--with_spn', action='store_true', help='with spn network or not')
parser.add_argument('--print_freq', type=int, default=5, help='print frequence')
parser.add_argument('--init_channels', type=int, default=1, help='initial channels for 2d feature extractor')
parser.add_argument('--nblocks', type=int, default=2, help='number of layers in each stage')
parser.add_argument('--channels_3d', type=int, default=4, help='number of initial channels 3d feature extractor ')
parser.add_argument('--layers_3d', type=int, default=4, help='number of initial layers in 3d network')
parser.add_argument('--growth_rate', type=int, nargs='+', default=[4,1,1], help='growth rate in the 3d network')
parser.add_argument('--spn_init_channels', type=int, default=8, help='initial channels for spnet')
parser.add_argument('--start_epoch_for_spn', type=int, default=121)
parser.add_argument('--loadmodel', type=str, default='results/pretrained_anynet/checkpoint.tar',
help='pretrained model path')
parser.add_argument('--start_epoch', type=int, default=1, help='start epoch')
parser.add_argument('--gpuid', type=str, default='0', help='the id of gpu to use')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpuid
gpuid = args.gpuid
print("use gpu {}".format(gpuid))
if args.datatype == '2015':
from dataloader import KITTIloader2015 as ls
elif args.datatype == '2012':
from dataloader import KITTIloader2012 as ls
def main():
global args
log = logger.setup_logger(args.save_path + '/training.log')
train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = ls.dataloader(
args.datapath,log)
TrainImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(train_left_img, train_right_img, train_left_disp, True),
batch_size=args.train_bsize, shuffle=True, num_workers=4, drop_last=False)
TestImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(test_left_img, test_right_img, test_left_disp, False),
batch_size=args.test_bsize, shuffle=False, num_workers=4, drop_last=False)
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
for key, value in sorted(vars(args).items()):
log.info(str(key) + ': ' + str(value))
model = models.anynet.AnyNet(args)
model = nn.DataParallel(model).cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
log.info('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
num_pretrain_items = 0
num_model_items = 0
# if args.loadpremodel is not None:
# pretrained_dict = torch.load(args.loadpremodel)
# # start_epoch = pretrained_dict['epoch'] + 1
# model_dict = model.state_dict()
# print('pretrained dict: ' + args.loadpremodel + ' : ' + str(len(pretrained_dict['state_dict'])))
# for k, v in pretrained_dict['state_dict'].items():
# print(k, v.shape)
# print('model dict: ' + str(len(model_dict)))
# for k, v in model_dict.items():
# print(k, v.shape)
# pretrained_dict = {k: v for k, v in pretrained_dict['state_dict'].items() if k in model_dict}
# num_pretrain_items = len(pretrained_dict.items())
# num_model_items = len(model_dict.items())
# print('Number of pretrained items: {:d}'.format(num_pretrain_items))
# print('Number of model items: {:d}'.format(num_model_items))
# model_dict.update(pretrained_dict)
# model.load_state_dict(model_dict)
# # state_dict = torch.load(args.loadpremodel)
# # model.load_state_dict(state_dict['state_dict'])
if args.loadmodel is not None:
pretrained_dict = torch.load(args.loadmodel)
# start_epoch = pretrained_dict['epoch'] + 1
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict['state_dict'].items() if k in model_dict}
num_pretrain_items = len(pretrained_dict.items())
num_model_items = len(model_dict.items())
print('Number of loaded items: {:d}'.format(num_pretrain_items))
print('Number of model items: {:d}'.format(num_model_items))
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
# state_dict = torch.load(args.loadmodel)
# model.load_state_dict(state_dict['state_dict'])
else:
start_epoch = 1
model_dict = model.state_dict()
num_model_items = len(model_dict.items())
print('Number of model items: {:d}'.format(num_model_items))
if args.start_epoch is not 1:
start_epoch = args.start_epoch
else:
start_epoch = 1
print(model)
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
# if args.pretrained:
# if os.path.isfile(args.pretrained):
# checkpoint = torch.load(args.pretrained)
# model.load_state_dict(checkpoint['state_dict'])
# log.info("=> loaded pretrained model '{}'"
# .format(args.pretrained))
# else:
# log.info("=> no pretrained model found at '{}'".format(args.pretrained))
# log.info("=> Will start from scratch.")
# args.start_epoch = 0
# if args.resume:
# if os.path.isfile(args.resume):
# log.info("=> loading checkpoint '{}'".format(args.resume))
# checkpoint = torch.load(args.resume)
# model.load_state_dict(checkpoint['state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer'])
# log.info("=> loaded checkpoint '{}' (epoch {})"
# .format(args.resume, checkpoint['epoch']))
# else:
# log.info("=> no checkpoint found at '{}'".format(args.resume))
# log.info("=> Will start from scratch.")
# else:
# log.info('Not Resume')
start_full_time = time.time()
for epoch in range(start_epoch, args.epochs + 1):
log.info('This is {}-th epoch'.format(epoch))
adjust_learning_rate(optimizer, epoch)
train(TrainImgLoader, model, optimizer, log, epoch)
savefilename = args.save_path + 'kitti_' + args.datatype + '_' + str(epoch) + '.tar'
torch.save({
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, savefilename)
if epoch % 1 ==0:
test(TestImgLoader, model, log)
test(TestImgLoader, model, log)
log.info('full training time = {:.2f} Hours'.format((time.time() - start_full_time) / 3600))
def train(dataloader, model, optimizer, log, epoch=1):
stages = 3 + args.with_spn
losses = [AverageMeter() for _ in range(stages)]
length_loader = len(dataloader)
model.train()
for batch_idx, (imgL, imgR, disp_L) in enumerate(dataloader):
imgL = imgL.float().cuda()
imgR = imgR.float().cuda()
disp_L = disp_L.float().cuda()
optimizer.zero_grad()
mask = disp_L > 0
mask.detach_()
outputs = model(imgL, imgR)
if args.with_spn:
if epoch >= args.start_epoch_for_spn:
num_out = len(outputs)
else:
num_out = len(outputs) - 1
else:
num_out = len(outputs)
outputs = [torch.squeeze(output, 1) for output in outputs]
loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
for x in range(num_out)]
sum(loss).backward()
optimizer.step()
for idx in range(num_out):
losses[idx].update(loss[idx].item())
if batch_idx % args.print_freq:
info_str = ['Stage {} = {:.2f}({:.2f})'.format(x, losses[x].val, losses[x].avg) for x in range(num_out)]
info_str = '\t'.join(info_str)
log.info('Epoch{} [{}/{}] {}'.format(
epoch, batch_idx, length_loader, info_str))
info_str = '\t'.join(['Stage {} = {:.2f}'.format(x, losses[x].avg) for x in range(stages)])
log.info('Average train loss = ' + info_str)
def test(dataloader, model, log):
stages = 3 + args.with_spn
D1s = [AverageMeter() for _ in range(stages)]
length_loader = len(dataloader)
model.eval()
for batch_idx, (imgL, imgR, disp_L) in enumerate(dataloader):
imgL = imgL.float().cuda()
imgR = imgR.float().cuda()
disp_L = disp_L.float().cuda()
with torch.no_grad():
outputs = model(imgL, imgR)
for x in range(stages):
output = torch.squeeze(outputs[x], 1)
D1s[x].update(error_estimating(output, disp_L).item())
info_str = '\t'.join(['Stage {} = {:.4f}({:.4f})'.format(x, D1s[x].val, D1s[x].avg) for x in range(stages)])
log.info('[{}/{}] {}'.format(
batch_idx, length_loader, info_str))
info_str = ', '.join(['Stage {}={:.4f}'.format(x, D1s[x].avg) for x in range(stages)])
log.info('Average test 3-Pixel Error = ' + info_str)
def error_estimating(disp, ground_truth, maxdisp=192):
gt = ground_truth
mask = gt > 0
mask = mask * (gt < maxdisp)
errmap = torch.abs(disp - gt)
err3 = ((errmap[mask] > 3.) & (errmap[mask] / gt[mask] > 0.05)).sum()
return err3.float() / mask.sum().float()
def adjust_learning_rate(optimizer, epoch):
if epoch <= 200:
lr = args.lr
elif epoch <= 400:
lr = args.lr * 0.1
else:
lr = args.lr * 0.01
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import os
import logging
from typing import Dict, Tuple
from .read import AppDataReader
from .fstprocessor import FSTDirectory, FSTFile
from ... import utils
_logger = logging.getLogger(__name__)
class AppExtractor:
def __init__(self, fst_entries: Tuple[Dict[str, FSTDirectory], Dict[str, FSTFile]]):
self.directories, files = fst_entries
# group files by their secondary index (= app file index),
# then sort the files in each of those groups by their offsets
self.files = {
index: sorted(group, key=lambda tup: tup[1].offset)
for index, group in
utils.misc.groupby_sorted(files.items(), key=lambda tup: tup[1].secondary_index)
}
def is_required(self, content_index: int) -> bool:
'''
Returns true if the app file with the provided index contains any
data specified in the FST (i.e. contains relevant data for unpacking)
'''
return content_index in self.files
def create_directories(self, target_path: str) -> None:
'''
Creates directories used by the content file at the given index to the specified path
'''
for dir_path, dir in self.directories.items():
if dir.deleted:
continue
path = self.__join_path(target_path, dir_path)
_logger.info(f'creating directory {path} (source index: {dir.secondary_index})')
os.makedirs(path, exist_ok=True)
def extract_files(self, content_index: int, reader: AppDataReader, target_path: str) -> None:
'''
Extracts files contained in the content file at the given index to the specified path
'''
for file_path, file in self.files[content_index]:
if file.deleted:
continue
path = self.__join_path(target_path, file_path)
_logger.info(f'extracting {file_path} (source index: {file.secondary_index}, offset: {file.offset}, size: {file.size})')
try:
with open(path, 'wb') as f:
for block in reader.get_data(file.offset, file.size):
f.write(block)
except Exception:
# remove (incomplete) file if exception was raised
if os.path.isfile(path):
os.unlink(path)
raise
@staticmethod
def __join_path(target_path: str, other_path: str) -> str:
path = os.path.join(target_path, other_path)
# make sure resulting path is inside target path
target_path_real = os.path.realpath(target_path)
assert os.path.commonprefix((os.path.realpath(path), target_path_real)) == target_path_real
return path
|
nilq/baby-python
|
python
|
"""
vtelem - A module for basic frame interfaces.
"""
# built-in
import math
from typing import Any, Dict, Tuple
# internal
from vtelem.classes.byte_buffer import ByteBuffer
from vtelem.classes.type_primitive import TypePrimitive, new_default
from vtelem.enums.primitive import random_integer
FRAME_OVERHEAD = new_default("count").type.value.size
def time_to_int(time: float, precision: int = 1000) -> int:
"""Convert a floating-point time value into an integer."""
frac, num = math.modf(time)
return int((int(num) * precision) + int(math.floor(frac * precision)))
class Frame:
"""A base class for frames."""
def __init__(
self,
mtu: int,
frame_id: TypePrimitive,
frame_type: TypePrimitive,
timestamp: TypePrimitive,
use_crc: bool = True,
) -> None:
"""Construct an empty frame."""
self.mtu = mtu
self.used: int = 0
self.buffer = ByteBuffer(bytearray(self.mtu))
self.id_primitive = new_default("id")
self.finalized = False
self.initialized = False
# write frame header: (application) id, type, timestamp
self.write(frame_id)
self.write(frame_type)
self.write(timestamp)
# write frame header: element count (placeholder)
self.count: Dict[str, Any] = {}
self.count["primitive"] = new_default("count")
self.count["position"] = self.buffer.get_pos()
self.count["value"] = 0
self.write(self.count["primitive"])
# reserve space for crc
self.crc = None
if use_crc:
self.crc = new_default("crc")
self.used += self.crc.size()
self.overhead = self.used
assert self.space > 0
def write(self, elem: TypePrimitive) -> None:
"""Write a primitive into the buffer."""
self.used += elem.write(self.buffer)
@property
def space(self) -> int:
"""Get the amount of space left in this frame."""
return self.mtu - self.used
def increment_count(self, amount: int = 1) -> None:
"""Increment this frame's count by some amount."""
self.count["value"] += amount
def pad(self, num_bytes: int) -> int:
"""
Attempt to add padding bytes at the end of a frame, return the actual
amout of padding added.
"""
# only allow padding at the end of a frame
assert self.finalized
# don't allow more padding outside the mtu
pad_amt = min(num_bytes, self.mtu - self.used)
self.buffer.append(bytearray(pad_amt), pad_amt)
self.used += pad_amt
return pad_amt
def pad_to_mtu(self) -> None:
"""Attempt to pad this frame to the full mtu size."""
self.pad(self.mtu - self.used)
@property
def raw(self) -> Tuple[bytearray, int]:
"""Obtain the raw buffer, and its size, from this frame."""
assert self.finalized
return self.buffer.data, self.used
def with_size_header(
self, frame_size: TypePrimitive = None
) -> Tuple[bytes, int]:
"""
Get a buffer (and its size) for this frame, with the inter-frame
size header included.
"""
if frame_size is None:
frame_size = new_default("count")
data, size = self.raw
assert frame_size.set(size)
return frame_size.buffer() + data, size + frame_size.type.value.size
def finalize_hook(self) -> None:
"""Can be overridden by implementing classes."""
def finalize(self, write_crc: bool = True) -> int:
"""
Finalize this frame, making the underlying buffer ready for wire-level
transport.
"""
if self.finalized:
return self.used
# write the count into the frame, into its reserved position
assert self.count["primitive"].set(self.count["value"])
self.count["primitive"].write(self.buffer, self.count["position"])
# run frame-specific finalization
self.finalize_hook()
assert self.initialized
# compute and write the crc
if self.crc is not None:
if write_crc:
self.crc.set(self.buffer.crc32())
else:
self.crc.set(random_integer(self.crc.type))
self.crc.write(self.buffer)
self.finalized = True
assert self.buffer.size == self.used
return self.used
|
nilq/baby-python
|
python
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Microsoft Public License, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
Create a Python class which derives from CLR type(s).
'''
#------------------------------------------------------------------------------
from iptest.assert_util import *
skiptest("silverlight")
add_clr_assemblies("baseclasscs", "typesamples")
from Merlin.Testing import *
from Merlin.Testing.BaseClass import *
import System
def test_simply_derive():
class C(EmptyClass): pass
class C(EmptyTypeGroup2): pass
class C(EmptyGenericClass[int]): pass
class C(IEmpty): pass
class C(IGenericEmpty[int]): pass
class C(AbstractEmptyClass): pass
class C(INotEmpty): pass
class C(AbstractNotEmptyClass): pass
#class C(EmptyDelegate): pass
class C(System.Double): pass
def test_multiple_typegroup():
class C(IInterfaceGroup1, IInterfaceGroup2): pass
class C(IInterfaceGroup1, IInterfaceGroup2, EmptyClass): pass
class C(EmptyTypeGroup2, IInterfaceGroup1, IInterfaceGroup2): pass
class C(EmptyTypeGroup2, IInterfaceGroup1[int], IInterfaceGroup2): pass
def test_negative_simply_derive():
# value type, sealed ref type
def f1():
class C(EmptyStruct): pass
def f2():
class C(EmptyEnum): pass
def f3():
class C(SealedClass): pass
def f4():
class C(System.Single): pass
AssertErrorWithMessage(TypeError, "cannot derive from Merlin.Testing.BaseClass.EmptyStruct because it is a value type", f1)
AssertErrorWithMessage(TypeError, "cannot derive from Merlin.Testing.BaseClass.EmptyEnum because it is a value type", f2)
AssertErrorWithMessage(TypeError, "cannot derive from Merlin.Testing.BaseClass.SealedClass because it is sealed", f3)
AssertErrorWithMessage(TypeError, "cannot derive from System.Single because it is a value type", f4)
# open generic
def f():
class C(EmptyGenericClass): pass
AssertErrorWithMessage(TypeError,
"C: cannot inhert from open generic instantiation IronPython.Runtime.Types.PythonType. Only closed instantiations are supported.",
f)
def f():
class C(IGenericEmpty): pass
AssertErrorWithMessage(TypeError,
"C: cannot inhert from open generic instantiation Merlin.Testing.BaseClass.IGenericEmpty`1[T]. Only closed instantiations are supported.",
f)
def f():
class C(EmptyTypeGroup1): pass
AssertErrorWithMessage(TypeError,
"cannot derive from open generic types <types 'EmptyTypeGroup1[T]', 'EmptyTypeGroup1[K, V]'>",
f)
# too many base (same or diff)
def f():
class C(EmptyClass, EmptyClass): pass
AssertErrorWithMessage(TypeError, "duplicate base class EmptyClass", f)
def f():
class C(IEmpty, EmptyClass, IEmpty): pass
AssertErrorWithMessage(TypeError, "duplicate base class IEmpty", f)
def f():
class C(EmptyClass, EmptyGenericClass[int]): pass
AssertErrorWithMessage(TypeError,
"C: can only extend one CLI or builtin type, not both Merlin.Testing.BaseClass.EmptyClass (for IronPython.Runtime.Types.PythonType) and Merlin.Testing.BaseClass.EmptyGenericClass`1[[System.Int32, mscorlib, Version=%d.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089]] (for IronPython.Runtime.Types.PythonType)" % System.Environment.Version.Major,
f)
class B:pass
b = B()
def f():
class C(object, b): pass
AssertErrorWithPartialMessage(TypeError,
"metaclass conflict instance and type",
f)
def f():
class C(EmptyGenericClass[()]): pass
AssertError(ValueError, f)
def test_system_type_cs0644():
# http://msdn2.microsoft.com/en-us/library/hxds244y(VS.80).aspx
# bug 363984
#class C(System.Delegate): pass
#class C(System.Array): pass
#class C(System.ValueType): pass
#class C(System.Enum): pass
pass
def test_mbr():
class C(System.MarshalByRefObject): pass
#class C('abc'): pass
# scenarios
# C derive from interface I, D derive from C and I (again)
# interface's base types: interfaces (implement them)
# ctor: params/param_dict
run_test(__name__)
|
nilq/baby-python
|
python
|
import source
import rssfeeds
from flask import Flask
app = Flask(__name__)
# Server test route
@app.route('/hello')
def hello_world():
return 'Hello, multiverse!'
# Server main route
@app.route('/')
def display_urls():
test_response = "\n*** START ***\n"
# Read the source file
feed_urls = source.get_source_list()
for url in feed_urls:
data = rssfeeds.get_posts_details(url)
if data != None:
test_response = test_response + str(data)
else:
test_response = test_response + "Unable to load: " + url;
test_response = test_response + str(feed_urls)
test_response = test_response + str(data)
test_response = test_response + ("\n*** END ***\n")
return test_response
|
nilq/baby-python
|
python
|
# Copyright 2013 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the Python DB API 2.0 (PEP 249) for Impala"""
from __future__ import absolute_import
import six
import time
import datetime
from impala._rpc.hiveserver2 import connect_to_impala as connect_to_hiveserver2
from impala._rpc.beeswax import connect_to_impala as connect_to_beeswax
from impala.dbapi.hiveserver2 import HiveServer2Connection
from impala.dbapi.beeswax import BeeswaxConnection
from impala.error import (Error, Warning, InterfaceError, DatabaseError,
InternalError, OperationalError, ProgrammingError,
IntegrityError, DataError, NotSupportedError)
from impala.util import warn_deprecate_hs2
# PEP 249 module globals
apilevel = '2.0'
threadsafety = 1 # Threads may share the module, but not connections
paramstyle = 'pyformat'
def connect(host='localhost', port=21050, protocol='hiveserver2',
database=None, timeout=45, use_ssl=False, ca_cert=None,
use_ldap=False, ldap_user=None, ldap_password=None,
use_kerberos=False, kerberos_service_name='impala'):
# PEP 249
if protocol.lower() == 'beeswax':
warn_deprecate_hs2()
service = connect_to_beeswax(
host, port, timeout, use_ssl, ca_cert, use_ldap, ldap_user,
ldap_password, use_kerberos, kerberos_service_name)
return BeeswaxConnection(service, default_db=database)
elif protocol.lower() == 'hiveserver2':
service = connect_to_hiveserver2(
host, port, timeout, use_ssl, ca_cert, use_ldap, ldap_user,
ldap_password, use_kerberos, kerberos_service_name)
return HiveServer2Connection(service, default_db=database)
else:
raise NotSupportedError(
"The specified protocol '%s' is not supported." % protocol)
class _DBAPITypeObject(object):
# Compliance with Type Objects of PEP 249.
def __init__(self, *values):
self.values = values
def __cmp__(self, other):
if other in self.values:
return 0
else:
return -1
def __eq__(self, other):
# py3 ignores __cmp__
return other in self.values
STRING = _DBAPITypeObject('STRING')
BINARY = _DBAPITypeObject('BINARY')
NUMBER = _DBAPITypeObject('BOOLEAN', 'TINYINT', 'SMALLINT', 'INT', 'BIGINT',
'FLOAT', 'DOUBLE', 'DECIMAL')
DATETIME = _DBAPITypeObject('TIMESTAMP')
ROWID = _DBAPITypeObject()
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
if six.PY3:
buffer = memoryview
Binary = buffer
|
nilq/baby-python
|
python
|
from kivy.app import App
from kivy.uix.widget import Widget
from color_util import get_normalized_color
from Chessboard import Chessboard, Color, Square
class ChessGame(Widget):
def on_touch_down(self, touch):
return
class ChessApp(App):
def build(self):
game = ChessGame()
return game
if __name__ == '__main__':
ChessApp().run()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Implentation of command `/logs`.
"""
from docker_utils import (
ContainerSelector,
DockerCommand
)
class Logs(DockerCommand):
"""Implementation of command `/start`.
"""
__HELP__ = """▪️ Usage: `/logs CONTAINER`:
Shows logs of a container."""
LOG_LINES_TO_FETCH: int = 25
def main(self):
container_name = self.arg(
"0",
ContainerSelector(self.docker_client),
"Choose a container:"
)
container = self.get_container(container_name)
if container:
logs_raw = container.logs(tail=Logs.LOG_LINES_TO_FETCH)
logs_lines = logs_raw.decode("UTF-8").split("\n")
logs_formatted = "\n".join(
[f'▪️ `{line}`' for line in logs_lines if line]
)
self.reply(
f'🗒 Logs for container `{container_name}` ' +
f'(last *{Logs.LOG_LINES_TO_FETCH}* lines):\n{logs_formatted}'
)
|
nilq/baby-python
|
python
|
"""Test searching volume content."""
from itertools import repeat
from random import randrange
import json
from django.test import TestCase, Client
from django.test import RequestFactory
from django.urls import reverse
from apps.users.tests.factories import UserFactory
from ...iiif.manifests.tests.factories import ManifestFactory
from ...iiif.canvases.tests.factories import CanvasFactory
from ...iiif.annotations.tests.factories import AnnotationFactory
from ..search import SearchManifestCanvas
from .factories import UserAnnotationFactory
from ..models import UserAnnotation
class TestReaduxPageDetailSearch(TestCase):
"""
Test page search.
"""
def setUp(self):
self.search_manifest_view = SearchManifestCanvas.as_view()
self.request = RequestFactory()
self.volume = ManifestFactory.create()
original_canvas = self.volume.canvas_set.first()
self.user = UserFactory.create()
self.ocr_user = UserFactory.create(username='ocr', name='OCR')
canvas_position = 1
for _ in repeat(None, randrange(5, 10)):
CanvasFactory.create(manifest=self.volume, position=canvas_position)
canvas_position += 1
self.volume.start_canvas = self.volume.canvas_set.all()[1]
self.volume.save()
# # Delete the canvas created by the ManifestFactory to ensure a clean set.
original_canvas.delete()
for _ in [1, 2]:
self.add_annotations(self.volume.canvas_set.get(position=1))
for _ in [1, 2, 3]:
self.add_annotations(self.volume.canvas_set.get(position=2))
# pylint: enable = unused-variable
self.client = Client()
self.url = reverse('search_pages')
def add_annotations(self, canvas):
"""Add OCR and User annotations to a canvas."""
AnnotationFactory.create(
canvas=canvas,
content='stankonia',
owner=self.ocr_user
)
UserAnnotationFactory.create(
canvas=canvas,
content='Aquemini',
owner=self.user
)
def load_results(self, response):
"""Decode the json response
:param response: search results
:type response: client response
:return: Dict of results
:rtype: dict
"""
return json.loads(response.content.decode('UTF-8-sig'))
def test_manifest_canvas_ocr_partial_search(self):
query_params = {'volume': self.volume.pid, 'type': 'partial', 'query': 'stank'}
request = self.request.get(
self.url, query_params
)
request.user = UserFactory.create()
response = self.search_manifest_view(request)
search_results = self.load_results(response)
assert len(search_results['ocr_annotations']) == 2
assert len(search_results['user_annotations']) == 0
assert search_results['search_terms'] == 'stank'.split()
assert json.loads(search_results['ocr_annotations'][0])['canvas__position'] == 1
assert json.loads(search_results['ocr_annotations'][1])['canvas__position'] == 2
assert json.loads(search_results['ocr_annotations'][0])['canvas__position__count'] == 2
assert json.loads(search_results['ocr_annotations'][1])['canvas__position__count'] == 3
def test_manifest_canvas_ocr_exact_search(self):
query_params = {'volume': self.volume.pid, 'type': 'exact', 'query': 'stankonia'}
request = self.request.get(
self.url, query_params
)
request.user = UserFactory.create()
response = self.search_manifest_view(request)
search_results = self.load_results(response)
assert len(search_results['ocr_annotations']) == 2
assert len(search_results['user_annotations']) == 0
assert json.loads(search_results['ocr_annotations'][0])['canvas__position'] == 1
assert json.loads(search_results['ocr_annotations'][1])['canvas__position'] == 2
assert json.loads(search_results['ocr_annotations'][0])['canvas__position__count'] == 2
assert json.loads(search_results['ocr_annotations'][1])['canvas__position__count'] == 3
def test_manifest_canvas_ocr_exact_search_no_results(self):
query_params = {'volume': self.volume.pid, 'type': 'exact', 'query': 'Idlewild'}
request = self.request.get(
self.url, query_params
)
request.user = UserFactory.create()
response = self.search_manifest_view(request)
search_results = self.load_results(response)
assert len(search_results['ocr_annotations']) == 0
assert len(search_results['user_annotations']) == 0
def test_manifest_canvas_user_annotation_partial_search(self):
query_params = {'volume': self.volume.pid, 'type': 'partial', 'query': 'Aqu'}
request = self.request.get(
self.url, query_params
)
request.user = self.user
response = self.search_manifest_view(request)
search_results = self.load_results(response)
assert len(search_results['ocr_annotations']) == 0
assert len(search_results['user_annotations']) == 2
assert json.loads(search_results['user_annotations'][0])['canvas__position'] == 1
assert json.loads(search_results['user_annotations'][1])['canvas__position'] == 2
def test_manifest_canvas_user_annotation_exact_search(self):
query_params = {'volume': self.volume.pid, 'type': 'exact', 'query': 'Aquemini'}
request = self.request.get(
self.url, query_params
)
request.user = self.user
response = self.search_manifest_view(request)
search_results = self.load_results(response)
assert len(search_results['ocr_annotations']) == 0
assert len(search_results['user_annotations']) == 2
assert json.loads(search_results['user_annotations'][0])['canvas__position'] == 1
assert json.loads(search_results['user_annotations'][1])['canvas__position'] == 2
def test_manifest_canvas_user_annotation_exact_search_no_results(self):
query_params = {'volume': self.volume.pid, 'type': 'exact', 'query': 'Idlewild'}
request = self.request.get(
self.url, query_params
)
request.user = self.user
response = self.search_manifest_view(request)
search_results = self.load_results(response)
assert len(search_results['ocr_annotations']) == 0
assert len(search_results['user_annotations']) == 0
|
nilq/baby-python
|
python
|
from pyliterature import Pyliterature
urls = [
'http://science.sciencemag.org/content/355/6320/49.full',
'http://www.nature.com/nature/journal/v541/n7635/full/nature20782.html',
'http://www.sciencedirect.com/science/article/pii/S1751616116301138',
'http://pubs.acs.org/doi/full/10.1021/acscatal.6b02960',
]
keyword = 'DFT'
liter = Pyliterature()
for url in urls:
print(url + '\n\n')
liter.url = url
liter.parser()
#
liter.url = None
liter.keyword = keyword
# print(liter.text)
liter.parser()
print('===================================================')
for keysent in liter.keysents:
print(keysent)
print('\n')
|
nilq/baby-python
|
python
|
class Simple:
def hello(self):
return 'Hello'
def world(self):
return 'world!'
def hello_world(self):
return '%s %s' % (self.hello(), self.world())
|
nilq/baby-python
|
python
|
import json
f = open('./config.json')
config = json.load(f)
print(config['URL'])
for k, v in config.items() :
print(k, ":", v)
|
nilq/baby-python
|
python
|
import socket
print(socket.gethostbyaddr("8.8.8.8"))
print(socket.gethostbyname("www.google.com"))
|
nilq/baby-python
|
python
|
"""Sokoban environments."""
import random
import numpy as np
from gym_sokoban.envs import sokoban_env_fast
from alpacka.envs import base
class Sokoban(sokoban_env_fast.SokobanEnvFast, base.ModelEnv):
"""Sokoban with state clone/restore and returning a "solved" flag.
Returns observations in one-hot encoding.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Return observations as float32, so we don't have to cast them in the
# network training pipeline.
self.observation_space.dtype = np.float32
def reset(self):
return super().reset().astype(np.float32)
def step(self, action):
(observation, reward, done, info) = super().step(action)
return (observation.astype(np.float32), reward, done, info)
def clone_state(self):
return self.clone_full_state()
def restore_state(self, state):
self.restore_full_state(state)
return self.render(mode=self.mode)
class ActionNoiseSokoban(Sokoban):
"""Sokoban with randomized actions."""
def __init__(self, action_noise, *args, **kwargs):
"""Initializes ActionNoiseSokoban.
Args:
action_noise: float, how often action passed to step() should be
replaced by one sampled uniformly from action space.
args: passed to Sokoban.__init__()
kwargs: passed to Sokoban.__init__()
"""
super().__init__(*args, **kwargs)
self._action_noise = action_noise
def step(self, action):
if random.random() < self._action_noise:
action = self.action_space.sample()
return super().step(action)
|
nilq/baby-python
|
python
|
from registration.models import Events, Registration
from rest_framework import serializers
class EventListSerializer(serializers.HyperlinkedModelSerializer):
has_users = serializers.SerializerMethodField()
class Meta:
model = Events
fields = ['title', 'text', 'date', 'has_users']
def get_has_users(self, obj):
return obj.registration_set.exists()
class EventDetailSerializer(serializers.HyperlinkedModelSerializer):
users = serializers.SerializerMethodField()
class Meta:
model = Events
fields = ['title', 'text', 'date', 'users']
def get_users(self, obj):
return [registration.user.username for registration in obj.registration_set.all()]
class RegistrationSerializer(serializers.ModelSerializer):
class Meta:
model = Registration
fields = ['user', 'event']
|
nilq/baby-python
|
python
|
from __future__ import print_function
from pipelineWrapper import PipelineWrapperBuilder
import logging
import os
import yaml
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
desc = """UCSC Precision Immuno pipeline"""
config = ("""patients:
{sample_name}:
tumor_dna_fastq_1 : {tumor_dna}
tumor_dna_fastq_2 : {tumor_dna2}
normal_dna_fastq_1 : {normal_dna}
tumor_type: {tumor_type}
ssec_encrypted: {ssec_encrypted}
filter_for_OxoG: {filter_for_OxoG}
normal_dna_fastq_2 : {normal_dna2}
tumor_rna_fastq_1 : {tumor_rna}
tumor_rna_fastq_2 : {tumor_rna2}
Universal_Options:
dockerhub: {dockerhub}
java_Xmx: {javaxmx}
reference_build: {reference_build}
sse_key: {sse_key}
sse_key_is_master: {sse_key_is_master}
gdc_download_token: {gdc_download_token}
storage_location: Local
output_folder: {work_mount}/output
mail_to: {mail_to}
# These options are for each specific tool. You probably don't need to change any of this!
alignment:
cutadapt:
version : {cutadapt_ver}
a : {cutadapt_a}
A : {cutadapt_A}
star:
version: {star_ver}
type : {star_type}
index : {star_index}
bwa:
version: {bwa_ver}
index : {bwa_index}
post:
samtools:
version: {samtools_alignment_ver}
picard:
version: {picard_ver}
expression_estimation:
rsem:
version: {rsem_ver}
index : {rsem_index}
mutation_calling:
indexes:
chromosomes: {chromosomes}
genome_fasta : {genome_fasta}
genome_fai : {genome_fai}
genome_dict : {genome_dict}
cosmic_vcf : {cosmic_vcf}
cosmic_idx : {cosmic_idx}
dbsnp_vcf : {dbsnp_vcf}
dbsnp_idx : {dbsnp_idx}
dbsnp_tbi : {dbsnp_tbi}
mutect:
version: {mutect_ver}
java_Xmx : {mutect_javaxmx}
muse:
version: {muse_ver}
radia:
version: {radia_ver}
cosmic_beds: {cosmic_beds}
dbsnp_beds: {dbsnp_beds}
retrogene_beds: {retrogene_beds}
pseudogene_beds: {pseudogene_beds}
gencode_beds: {gencode_beds}
somaticsniper:
version: {somaticsniper_ver}
samtools:
version: {samtools_somaticsniper_ver}
bam_readcount:
version: {bamreadcount_ver}
star_fusion:
run: {starfusion}
version: {star_fusion_ver}
fusion_inspector:
run_trinity: {run_trinity}
version: {fusioninspector_ver}
strelka:
version: {strelka_ver}
config_file: {strelka_config}
mutation_annotation:
snpeff:
version: {snpeff_ver}
index : {snpeff}
java_Xmx : {spnff_javaxmx}
mutation_translation:
transgene:
version: {transgene_ver}
gencode_peptide_fasta : {transgene_peptide_fasta}
gencode_transcript_fasta : {transgene_transcript_fasta}
gencode_annotation_gtf : {transgene_annotation_gtf}
genome_fasta : {transgene_genome}
haplotyping:
phlat:
version: {phlat_ver}
index : {phlat}
mhc_peptide_binding:
mhci:
version: {mhci_ver}
method_file : {mhci}
pred : {mhci_pred}
mhcii:
version: {mhcii_ver}
method_file : {mhcii}
pred : {mhcii_pred}
netmhciipan:
version: {netmhciipan_ver}
prediction_ranking:
rankboost:
version: {rankboost_ver}
mhci_args:
npa: {mhci_npa}
nph: {mhci_nph}
nMHC: {mhci_nMHC}
TPM: {mhci_TPM}
overlap: {mhci_overlap}
tndelta: {mhci_tndelta}
mhcii_args:
npa: {mhcii_npa}
nph: {mhcii_nph}
nMHC: {mhcii_nMHC}
TPM: {mhcii_TPM}
tndelta: {mhcii_tndelta}
reports:
mhc_pathways_file: {mhc_pathways_file}
itx_resistance_file: {itx_resistance_file}
immune_resistance_pathways_file: {immune_resistance_pathways_file}
car_t_targets_file: {car_t_targets_file}""")
def str2bool(v):
"""
Necessary due to how argsparse works; see
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
import protect
with open(os.path.join(os.path.dirname(protect.__file__), "pipeline",
"defaults.yaml")) as def_file:
defaults = yaml.load(def_file)
wrapper = PipelineWrapperBuilder('ProTECT', desc, config)
parser = wrapper.get_args()
parser.add_argument('--sample-name', type=str, required=True,
help="Name for the sample.")
parser.add_argument('--tumor-dna',
type=str, required=True,
help='Path for the tumor fastq.')
parser.add_argument('--normal-dna', type=str, required=True,
help='Path for the normal fastq.')
parser.add_argument('--tumor-rna', type=str, required=True,
help='Path for the tumor RNA fastq.')
parser.add_argument('--tumor-type', type=str, required=True,
help='Tumor type (such as STAD).')
parser.add_argument('--tumor-dna2', type=str, required=True,
help='Path for the tumor fastq pair.')
parser.add_argument('--normal-dna2', type=str, required=True,
help='Path for the normal fastq.')
parser.add_argument('--tumor-rna2', type=str, required=True,
help='Path for the tumor RNA fastq.')
parser.add_argument('--reference-build', type=str,
choices=['hg19', 'hg38'], default='hg19',
help='Reference build. Can be hg19 or hg38.')
parser.add_argument('--ssec-encrypted', type=str2bool, default='False')
parser.add_argument('--filter-for-OxoG', type=str2bool, default='False')
parser.add_argument('--cutadapt-a', type=str, default="AGATCGGAAGAG")
parser.add_argument('--cutadapt-A', type=str, default="AGATCGGAAGAG")
parser.add_argument('--cutadapt_ver', type=str,
default=defaults["alignment"]["cutadapt"]["version"],
help='Version of cutadapt.')
parser.add_argument('--star-type', type=str, choices=['star','starlong'], default='star',
help='Use starlong if your reads are > 150bp')
parser.add_argument('--star-index', type=str,
default="S3://protect-data/<reference_build>_references/"
"star_with_fusion_100bp_readlen_indexes.tar.gz",
help='Index for star.')
parser.add_argument('--star-ver', type=str,
default=defaults["alignment"]["star"]["version"],
help='Version of star.')
parser.add_argument('--bwa-index', type=str,
default="S3://protect-data/<reference_build>_references/bwa_index.tar.gz",
help='Path for bwa index.')
parser.add_argument('--bwa-ver', type=str,
default=defaults["alignment"]["bwa"]["version"],
help='Version of bwa.')
parser.add_argument('--samtools_alignment_ver', type=str,
default=defaults["alignment"]["post"]["samtools"]["version"],
help='Version of samtools for alignment.')
parser.add_argument('--picard-ver', type=str,
default=defaults["alignment"]["post"]["picard"]["version"],
help='Version of picard.')
parser.add_argument('--rsem-index', type=str,
default="S3://protect-data/<reference_build>_references/rsem_index.tar.gz",
help='Path for rsem index.')
parser.add_argument('--rsem-ver', type=str,
default=defaults["expression_estimation"]["rsem"]["version"],
help='Version of rsem.')
parser.add_argument('--mutect-ver', type=str,
default=defaults["mutation_calling"]["mutect"]["version"],
help='Version of mutect.')
parser.add_argument('--mutect-javaxmx', type=str, default='2G')
parser.add_argument('--muse-ver', type=str,
default=defaults["mutation_calling"]["muse"]["version"],
help='Version of muse.')
parser.add_argument('--radia-ver', type=str,
default=defaults["mutation_calling"]["radia"]["version"],
help='Version of radia.')
parser.add_argument('--cosmic-beds', type=str,
default="S3://protect-data/<reference_build>_references/"
"radia_cosmic.tar.gz",
help='Cosmic bed file for use by Radia.')
parser.add_argument('--dbsnp-beds', type=str,
default="S3://protect-data/<reference_build>_references/"
"radia_dbsnp.tar.gz",
help='dbsnp bed file for use by Radia.')
parser.add_argument('--retrogene-beds', type=str,
default="S3://protect-data/<reference_build>_references/"
"radia_retrogenes.tar.gz",
help='Retrogene bed file for use by Radia.')
parser.add_argument('--pseudogene-beds', type=str,
default="S3://protect-data/<reference_build>_references/"
"radia_pseudogenes.tar.gz",
help='Psuedogene bed file for use by Radia.')
parser.add_argument('--gencode-beds', type=str,
default="S3://protect-data/<reference_build>_references/"
"radia_gencode.tar.gz",
help='Gencode bed file for use by Radia.')
parser.add_argument('--somaticsniper-ver', type=str,
default=defaults["mutation_calling"]["somaticsniper"]["version"],
help='Version of somatic sniper.')
parser.add_argument('--samtools_somaticsniper-ver', type=str,
default=defaults["mutation_calling"]["somaticsniper"]["samtools"]["version"],
help='Version of samtools for somatic sniper')
parser.add_argument('--bamreadcount-ver', type=str,
default=defaults["mutation_calling"]["somaticsniper"]["bam_readcount"]["version"],
help='Version of bam_readcount.')
parser.add_argument('--strelka-ver', type=str,
default=defaults["mutation_calling"]["strelka"]["version"],
help='Version of strelka.')
parser.add_argument('--strelka-config', type=str,
default="S3://protect-data/<reference_build>_references/"
"strelka_bwa_WXS_config.ini.tar.gz",
help='Path to config for strelka.')
parser.add_argument('--starfusion', type=str2bool,
default=defaults["mutation_calling"]["star_fusion"]["run"],
help='Set to false to skip fusion calling.')
parser.add_argument('--star-fusion-ver', type=str,
default=defaults["mutation_calling"]["star_fusion"]["version"],
help='Version of star fusion.')
parser.add_argument('--run-trinity', type=str2bool,
default=defaults["mutation_calling"]["fusion_inspector"]["run_trinity"],
help='Set to false to skip de novo transcript assembly.')
parser.add_argument('--fusioninspector-ver', type=str,
default=defaults["mutation_calling"]["fusion_inspector"]["version"])
parser.add_argument('--snpeff-ver', type=str,
default=defaults["mutation_annotation"]["snpeff"]["version"],
help='Version of snpeff')
parser.add_argument('--snpeff', type=str,
default="S3://protect-data/<reference_build>_references/snpeff_index.tar.gz",
help='Path to indexes for snpeff.')
parser.add_argument('--spnff-javaxmx', type=str, default='20G')
parser.add_argument('--transgene-peptide-fasta', type=str,
default="S3://protect-data/<reference_build>_references/"
"gencode.<reference_gencode>.pc_translations_NOPARY.fa.tar.gz",
help='Path to gencode peptide fasta for transgene.')
parser.add_argument('--transgene-transcript-fasta', type=str,
default="S3://protect-data/<reference_build>_references/"
"gencode.<reference_gencode>.pc_transcripts.fa.tar.gz",
help='Path to gencode transcript fasta.')
parser.add_argument('--transgene-annotation-gtf', type=str,
default="S3://protect-data/<reference_build>_references/"
"gencode.<reference_gencode>.annotation_NOPARY.gtf.tar.gz",
help='Path to gencode annotation gtf.')
parser.add_argument('--transgene-genome', type=str,
default="S3://protect-data/<reference_build>_references/"
"<reference_build>.fa.tar.gz",
help='Path to genome fasta.')
parser.add_argument('--transgene-ver', type=str,
default=defaults["mutation_translation"]["transgene"]["version"],
help='Version of transgene.')
parser.add_argument('--phlat-ver', type=str,
default=defaults["haplotyping"]["phlat"]["version"],
help='Version of phlat.')
parser.add_argument('--phlat', type=str,
default="S3://protect-data/<reference_build>_references/phlat_index.tar.gz",
help='Path to config for phlat.')
parser.add_argument('--mhci-ver', type=str,
default=defaults["mhc_peptide_binding"]["mhci"]["version"],
help='Version of mhci.')
parser.add_argument('--mhci', type=str,
default="S3://protect-data/<reference_build>_references"
"/mhci_restrictions.json.tar.gz",
help='Path to config for mhci.')
parser.add_argument('--mhci-pred', type=str, default='IEDB_recommended')
parser.add_argument('--mhcii-pred', type=str, default='IEDB_recommended')
parser.add_argument('--mhci-npa', type=str,
default=defaults['prediction_ranking']['rankboost']['mhci_args']['npa'])
parser.add_argument('--mhci-nph', type=str,
default=defaults['prediction_ranking']['rankboost']['mhci_args']['nph'])
parser.add_argument('--mhci-nMHC', type=str,
default=defaults['prediction_ranking']['rankboost']['mhci_args']['nMHC'])
parser.add_argument('--mhci-TPM', type=str,
default=defaults['prediction_ranking']['rankboost']['mhci_args']['TPM'])
parser.add_argument('--mhci-overlap', type=str,
default=defaults['prediction_ranking']['rankboost']['mhci_args']['overlap'])
parser.add_argument('--mhci-tndelta', type=str,
default=defaults['prediction_ranking']['rankboost']['mhci_args']['tndelta'])
parser.add_argument('--mhcii-ver', type=str,
default=defaults["mhc_peptide_binding"]["mhcii"]["version"],
help='Version of mhcii.')
parser.add_argument('--mhcii', type=str,
default="S3://protect-data/<reference_build>_references/"
"mhcii_restrictions.json.tar.gz",
help='Path to config for mhcii.')
parser.add_argument('--mhcii-npa', type=str,
default=defaults['prediction_ranking']['rankboost']['mhcii_args']['npa'])
parser.add_argument('--mhcii-nph', type=str,
default=defaults['prediction_ranking']['rankboost']['mhcii_args']['nph'])
parser.add_argument('--mhcii-nMHC', type=str,
default=defaults['prediction_ranking']['rankboost']['mhcii_args']['nMHC'])
parser.add_argument('--mhcii-TPM', type=str,
default=defaults['prediction_ranking']['rankboost']['mhcii_args']['TPM'])
parser.add_argument('--mhcii-tndelta', type=str,
default=defaults['prediction_ranking']['rankboost']['mhcii_args']['tndelta'])
parser.add_argument('--netmhciipan-ver', type=str,
default=defaults["mhc_peptide_binding"]["netmhciipan"]["version"],
help='Version of netmhciipain.')
parser.add_argument('--rankboost-ver', type=str,
default=defaults["prediction_ranking"]["rankboost"]["version"],
help='Version of rankboost.')
parser.add_argument('--chromosomes', type=str, nargs=2, default="")
parser.add_argument('--genome-fasta', type=str,
default="S3://protect-data/<reference_build>_references/"
"<reference_build>.fa.tar.gz",
help='Genome fasta to be used by the mutation callers.')
parser.add_argument('--genome-fai', type=str,
default="S3://protect-data/<reference_build>_references/"
"<reference_build>.fa.fai.tar.gz",
help='Corresponding fai file for the genome fasta.')
parser.add_argument('--genome-dict', type=str,
default="S3://protect-data/<reference_build>_references/"
"<reference_build>.dict.tar.gz",
help='Corresponding dict file for the genome fasta.')
parser.add_argument('--cosmic-vcf', type=str,
default="S3://protect-data/<reference_build>_references/"
"CosmicCodingMuts.vcf.tar.gz",
help='vcf for cosmic coding.')
parser.add_argument('--cosmic-idx', type=str,
default="S3://protect-data/<reference_build>_references/"
"CosmicCodingMuts.vcf.idx.tar.gz",
help='Corresponding idx for the cosmic coding vcf.')
parser.add_argument('--dbsnp-vcf', type=str,
default="S3://protect-data/<reference_build>_references/"
"dbsnp_coding.vcf.gz",
help='vcf for dbsnp.')
parser.add_argument('--dbsnp-idx', type=str,
default="S3://protect-data/<reference_build>_references/"
"dbsnp_coding.vcf.idx.tar.gz",
help='Corresponding idx for the dbsnp vcf.')
parser.add_argument('--dbsnp-tbi', type=str,
default="S3://protect-data/<reference_build>_references/"
"dbsnp_coding.vcf.gz.tbi",
help='Tabix index for dbsnp.gz.')
parser.add_argument('--mhc-pathways-file', type=str,
default="S3://cgl-pipeline-inputs/protect/ci_references/"
"mhc_pathways.tsv.tar.gz",
help='JSON file containing the various genes in the MHC pathway'
'and their mean TPM expressions across samples in a background set.')
parser.add_argument('--itx-resistance-file', type=str,
default="S3://cgl-pipeline-inputs/protect/ci_references/"
"itx_resistance.tsv.tar.gz",
help='')
parser.add_argument('--immune-resistance-pathways-file', type=str,
default="S3://cgl-pipeline-inputs/protect/ci_references/"
"immune_resistance_pathways.json.tar.gz",
help='')
parser.add_argument('--car-t-targets-file', type=str,
default="S3://cgl-pipeline-inputs/protect/ci_references/"
"car_t_targets.tsv.tar.gzz",
help='')
parser.add_argument('--dockerhub', type=str, default='aarjunrao')
parser.add_argument('--javaxmx', default='20G', type=str)
parser.add_argument('--sse-key', type=str, default='',
help='Path to the desired SSE-key, if any.')
parser.add_argument('--sse-key-is-master', type=str2bool, default='False',
help='Indicates if the passed sse-key is the master key.')
parser.add_argument('--gdc-download-token', type=str,
help='A download token used to download files from the GDC')
parser.add_argument('--mail-to', type=str,
help='Address to send an email to upon successful completion.')
parser.add_argument('--work-mount', required=True,
help='Mount where intermediate files should be written. This directory '
'should be mirror mounted into the container.')
args = parser.parse_args()
args.chromosomes = ', '.join(args.chromosomes)
for key in args.__dict__:
try:
args.__dict__[key] = args.__dict__[key].replace('<reference_build>',
args.reference_build)
except AttributeError:
pass
try:
args.__dict__[key] = args.__dict__[key].replace('<reference_gencode>',
'v19' if args.reference_build == 'hg19'
else 'v25')
except AttributeError:
pass
command = []
wrapper.run(args, command)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.