text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env python3
""" api access for google sheets (and friends)
Usage:
googapis auth (sheets|docs|drive)... [options] [--drive-scope=<SCOPE>...]
Examples:
googapis auth sheets
Options:
--store-file=<PATH>... write to a specific store file
-n --readonly set the readonly scope
--drive-scope=<SCOPE>... add drive scopes (overrides readonly)
values: appdata
file
metadata
metadata.readonly
photos.readonly
readonly
scripts
-d --debug
"""
import sys
from pathlib import Path
from pyontutils.utils import log
from pyontutils.clifun import Dispatcher, Options as BaseOptions
from pyontutils.sheets import _get_oauth_service
log = log.getChild('googapis')
class Options(BaseOptions):
drive_scopes = (
'appdata',
'file',
'metadata',
'metadata.readonly',
'photos.readonly',
'readonly',
'scripts',)
def __new__(cls, args, defaults):
bads = []
for scope in args['--drive-scope']:
if scope not in cls.drive_scopes:
bads.append(scope)
if bads:
log.error(f'Invalid scopes! {bads}')
sys.exit(1)
return super().__new__(cls, args, defaults)
@property
def store_file(self):
return Path(self._args['--store-file']).resolve()
class Main(Dispatcher):
@property
def _scopes(self):
base = 'https://www.googleapis.com/auth/'
suffix = '.readonly' if self.options.readonly else ''
if self.options.sheets:
yield base + 'spreadsheets' + suffix
if self.options.docs:
yield base + 'doccuments' + suffix
if self.options.drive:
suffixes = []
suffixes += ['.' + s for s in self.options.drive_scope]
if suffix and not suffixes:
suffixes.append(suffix)
if not suffixes:
suffixes = '',
for suffix in suffixes:
yield base + 'drive' + suffix
def auth(self):
newline = '\n'
scopes = list(self._scopes)
if self.options.debug:
log.debug(f'requesting for scopes:\n{newline.join(scopes)}')
service = _get_oauth_service(readonly=self.options.readonly, SCOPES=scopes,
store_file=self.options.store_file)
# FIXME decouple this ...
log.info(f'Auth finished successfully for scopes:\n{newline.join(scopes)}')
def main():
from docopt import docopt, parse_defaults
args = docopt(__doc__, version='googapis 0.0.0')
defaults = {o.name:o.value if o.argcount else None for o in parse_defaults(__doc__)}
options = Options(args, defaults)
main = Main(options)
if main.options.debug:
log.setLevel('DEBUG')
print(main.options)
main()
if __name__ == '__main__':
main()
| 3,133 | 905 |
class Solution:
def findPermutation(self, s: str) -> List[int]:
ans = [i for i in range(1, len(s) + 2)]
# for each D* group (s[i..j]), reverse ans[i..j + 1]
i = -1
j = -1
def getNextIndex(c: chr, start: int) -> int:
for i in range(start, len(s)):
if s[i] == c:
return i
return len(s)
while True:
i = getNextIndex('D', j + 1)
if i == len(s):
break
j = getNextIndex('I', i + 1)
ans[i:j + 1] = ans[i:j + 1][::-1]
return ans
| 517 | 214 |
from django.contrib import admin
# from models import PasswordReset
#
# class PasswordResetAdmin(admin.ModelAdmin):
# list_display = ["user", "temp_key", "timestamp", "reset"]
#
# admin.site.register(PasswordReset, PasswordResetAdmin)
| 242 | 70 |
"""
WayScript Errors
"""
class MissingCredentialsError(Exception):
"""Error thrown when a workspace integration does not have requisite credentials"""
pass
| 165 | 44 |
import numpy as np
from itertools import product
from tensorflow.keras.models import load_model
from sklearn.feature_selection import SelectKBest, f_regression
from joblib import load
import random as rnd
class Gridex:
def __init__(self, target, name, ext, feat, steps, th, adap = None):
print("GridEx -", name, "data set")
self.name = name
self.ext = ext
self.feat = feat
self.target = target
self.steps = steps
self.threshold = th
self.model = load_model("models/{}".format(name))
self.fake = load("datasets/train/x/{}.{}.joblib".format(name, ext))
self.Xtrain = np.array(self.fake)
self.Xtest = np.array(load("datasets/test/x/{}.{}.joblib".format(name, ext)))
self.__adaptiveSplits(adap)
self.__createSurrounding()
self.__iterate()
def __count(self, c, samples, mean = False):
cond = np.ones((len(samples),), dtype = bool)
for i, f in enumerate(self.feat):
[a, b] = c[f]
col = samples[:, i]
cond &= ((a <= col) & (col <= b))
n = len(np.nonzero(cond)[0])
if mean:
if n > 0:
pred = self.model.predict(samples[cond])
return n, samples[cond].tolist(), pred.mean(), pred.std()
else:
return n, samples[cond].tolist(), 0, 0
else:
return n, samples[cond].tolist()
def __predict(self, samples):
ret = []
for s in samples:
for hc in self.hyperCubes:
found = True
c = self.hyperCubes[hc]
for i, f in enumerate(self.feat):
[a, b] = c[f]
v = s[i]
found &= (a <= v <= b)
if ~found:
break
if found:
ret.append(c[self.target])
break
if ~found:
ret.append(np.nan)
return ret
def __createSurrounding(self):
self.minmax = { "std" : 2 * self.threshold, self.target : 0 } # surrounding cube
for i, c in enumerate(self.feat):
mi = min(self.Xtrain[:, i].min(), self.Xtest[:, i].min())
ma = max(self.Xtrain[:, i].max(), self.Xtest[:, i].max())
eps = 1e-5
self.minmax[c] = [mi - eps, ma + eps]
self.V = 1.
for f in self.feat:
[a, b] = self.minmax[f]
self.V *= (b - a)
def __iterate(self):
prev = { 0 : self.minmax }
tot = 0
for step in self.steps:
self.hyperCubes = {}
for c in prev:
self.split = {}
if self.__count(prev[c], self.Xtrain)[0] == 0:
continue
if prev[c]["std"] < self.threshold:
self.hyperCubes[len(self.hyperCubes)] = prev[c]
continue
ranges = {}
for (f, imp) in zip(self.feat, self.scores):
r = []
[a, b] = prev[c][f]
if self.adap is not None:
step = self.adap[f]
s = (b - a) / step
for i in range(step):
r.append([a + s * i, a + s * (i + 1)])
ranges[f] = r
prod = list(product(*ranges.values()))
tot += len(prod)
for (pn, p) in enumerate(prod):
print("{:.2f}%".format(pn / len(prod) * 100), end = "\r")
cube = { self.target : 0 }
for i, f in enumerate(self.feat):
cube[f] = p[i]
n, s, m, std = self.__count(cube, self.Xtrain, True)
self.__produceFake(cube, n)
nn, s, m, std = self.__count(cube, np.array(self.fake), True)
if n > 0:
cube[self.target] = m
cube["std"] = std
cube["n"] = n
if std > self.threshold:
self.hyperCubes[len(self.hyperCubes)] = cube
else:
self.split[len(self.split)] = cube
co = 0
to = len(self.split)
self.oldAdj = {}
self.oldMer = {}
self.last = [i for i in self.split]
while(self.__merge()):
co += 1
print("merged", co, "of", to, " " * 20, end = "\r")
for res in self.split:
n, s = self.__count(self.split[res], self.Xtrain, False)
self.hyperCubes[len(self.hyperCubes)] = self.split[res]
print("Useful hyper-cubes:", len(self.hyperCubes), "of", tot)
self.__checkV()
prev = self.hyperCubes.copy()
self.metrics()
print()
def __merge(self):
ret = False
checked = []
self.temp = []
for i in self.split:
checked.append(i)
for j in self.split:
if j not in checked:
if (i in self.last) or (j in self.last):
adj = self.__adjacent(self.split[i], self.split[j])
else:
adj = self.oldAdj[(i, j)]
if adj is not None:
self.temp.append((i, j, adj))
self.oldAdj[(i, j)] = adj
merged = []
for (i, j, adj) in self.temp:
if (i in self.last) or (j in self.last):
t = self.__tempCube(i, j, adj)
self.oldMer[(i, j)] = t
else:
t = self.oldMer[(i, j)]
if t is not None:
merged.append(t)
if(len(merged) > 0):
std, c1, c2, mi = min(merged)
del self.split[c1]
del self.split[c2]
self.last = [c1, c2]
self.split[c1] = mi
ret = True
return ret
def __tempCube(self, i, j, f):
c1 = self.split[i]
c2 = self.split[j]
cube = {}
for k in self.feat:
if k != f:
cube[k] = c1[k]
else:
[a1, b1] = c1[f]
[a2, b2] = c2[f]
cube[f] = [min(a1, a2), max(b1, b2)]
n, s, m, std = self.__count(cube, np.array(self.fake), True)
cube[self.target] = m
cube["std"] = std
cube["n"] = n
if std < self.threshold:
return (std, i, j, cube)
else:
return None
def __adjacent(self, c1, c2):
adj = None
for f in self.feat:
if c1[f] == c2[f]:
continue
if adj is not None:
return None
[a1, b1] = c1[f]
[a2, b2] = c2[f]
if (b1 == a2) or (b2 == a1):
adj = f
else:
return None
return adj
def __produceFake(self, cube, n):
for i in range(n, 15):
sample = []
for f in self.feat:
[a, b] = self.minmax[f]
sample.append(rnd.uniform(a, b))
self.fake.append(sample)
def __adaptiveSplits(self, adap):
fs = SelectKBest(score_func = f_regression, k = "all")
fit = fs.fit(self.Xtrain, self.model.predict(self.Xtrain).flatten())
self.scores = np.array(fit.scores_) / max(fit.scores_)
#print(self.scores)
self.adap = {}
if adap is not None:
for (f, imp) in zip(self.feat, self.scores):
step = 1
for (l, s) in adap:
if imp > l:
step = s
else:
break
self.adap[f] = step
else:
self.adap = None
#print(self.adap)
def __volume(self, hc):
v = 1.
for f in self.feat:
[a, b] = hc[f]
v *= (b - a)
return v
def __checkV(self):
tot = 0.
self.vols = []
for c in self.hyperCubes:
hc = self.hyperCubes[c]
v = self.__volume(hc)
self.vols.append(v / self.V)
tot += v
print("Covered {:.2f}% of the surrounding cube".format(tot / self.V * 100))
def metrics(self, p = True):
ITER = np.array(self.__predict(self.Xtest))
TRUE = load("datasets/test/y/{}.{}.joblib".format(self.name, self.ext)).values
ANN = self.model.predict(self.Xtest).flatten()
nan = np.count_nonzero(np.isnan(ITER))
if nan > 0:
if p:
print(nan, "outliers of", len(self.Xtest), "test samples ({:.2f}%)".format(nan / len(self.Xtest) * 100))
idx = np.argwhere(~np.isnan(ITER))
ITER = ITER[idx]
TRUE = TRUE[idx]
ANN = ANN[idx]
if p:
print("MAE wrt data: {:.2f}, wrt ANN: {:.2f}, ANN MAE: {:.2f}".format(self.__mae(ITER, TRUE), self.__mae(ITER, ANN), self.__mae(ANN, TRUE)))
print("R2 wrt data: {:.2f}, wrt ANN: {:.2f}, ANN MAE: {:.2f}".format(self.__r2(ITER, TRUE), self.__r2(ITER, ANN), self.__r2(ANN, TRUE)))
print()
n = []
for h in self.hyperCubes:
n.append(self.__count(self.hyperCubes[h], self.Xtrain, self.feat)[0])
return (n, self.vols)
def __r2(self, pred, true):
u = ((true - pred)**2).sum()
v = ((true - true.mean())**2).sum()
r2 = 1 - u / v
return r2
def __mae(self, pred, true):
return abs(pred - true).mean() | 10,112 | 3,221 |
import xmltodict
from browsers import load_firefox
GOOGLE_SITEMAP_URL = "https://careers.google.com/jobs/sitemap"
def get_xml():
browser = load_firefox()
browser.get(GOOGLE_SITEMAP_URL)
jobs_data = xmltodict.parse(browser.page_source)
browser.quit()
return jobs_data
def get_jobs():
jobs_data = get_xml()
jobs = jobs_data['urlset']['url']
return jobs
def get_internships(jobs):
internships = []
for job in jobs:
url = job['loc']
mid = '-intern-' in url
end = '-intern/' in url
if mid or end:
internships.append(job)
return internships
| 630 | 217 |
# Testing Upload image feature
import tempfile
import os
from PIL import Image
# ----------------
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework import test
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list') # generated by viewsets
# /api/recipe/recipes
# /api/recipe/recipes/1/
def detail_url(recipe_id):
""" return recipe detail url """
# this is the way reverse function works
return reverse('recipe:recipe-detail', args=[recipe_id])
# helper function for creating sample recipes, tag, ingredient
def sample_ingredient(user, name='Cinnamon'):
""" create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_tag(user, name='MAIN COURSE'):
""" create and Return a simple tag """
return Tag.objects.create(user=user, name=name)
def sample_recipe(user, **params):
""" create and return a sample recipe """
defaults = {
'title':'sample recipe',
'time_minutes':10,
'price': 5.00,
}
defaults.update(params) # default built-in function of python for dics
return Recipe.objects.create(user=user, **defaults)
def image_upload_url(recipe_id):
""" Return Url for recipe image upload """
return reverse('recipe:recipe-upload-image', args=[recipe_id])
class PublicRecipeApiTests(TestCase):
""" Tests unauthenticated recipe api access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
""" tests that authentication is required """
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTest(TestCase):
""" Tests Authenticated Api Access """
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@eniac.com',
'testpass',
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
""" test retrieving a list of recipes """
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
""" test retrieving recipes for user """
user2 = get_user_model().objects.create_user(
'test2@eniac.com',
'testpass2',
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(
user=self.user
)
serializer = RecipeSerializer(recipes, many=True) # because its a list
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
""" tests viewing a recipe detail """
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe) # it is a single object
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
""" test creating recipe"""
payload = {
'title': 'chocolate cheesecake',
'time_minutes':30,
'price':5.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
# get_attr is a built_in python function
def test_create_recipe_with_tags(self):
""" test creating a recipe with tags """
tag1 = sample_tag(user=self.user, name='Vegen')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'avocado lime cheese cake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all() # return all tags as a queryset
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
# use assertIn for checking list or checking querysets
def test_create_recipe_with_ingredients(self):
ingredient1 = sample_ingredient(user=self.user, name="Prawns")
ingredient2 = sample_ingredient(user=self.user, name="Jinja")
payload = {
'title': "thai prawn red curry",
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@eniac.com',
'testpass',
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self): # trigerred after test completes
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
""" test uploading image to recipe """
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10,10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
# multipart is for posting Data instead of json format
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
""" test uploading an invalid image """
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'Not Image'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
""" Tests Returning recipes with specific tags """
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='aubergine with tahini')
tag1 = sample_tag(user=self.user, name='vegan')
tag2 = sample_tag(user=self.user, name='vegeterian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and Chips')
res = self.client.get(
RECIPES_URL,
{'tags': f'{tag1.id}, {tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
""" Tests returning recipes with specific ingredients """
recipe1 = sample_recipe(user=self.user, title='posh bin on toasts')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta Cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushroom')
res = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id}, {ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 9,162 | 2,984 |
import requests
from lxml import html
import os
import sys
sys.path.append("..")
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"}
def adafruitScrape(link):
link = link.strip()
response = requests.get(link,headers=header)
root = html.fromstring(response.content)
name = root.xpath('//h1[@class="products_name"]/text()')[0].strip()
price = root.xpath('//div[@class="product-price"]/span/text()')[0].strip("$")
info = {"Name":name,"Price":price,"URL":link}
info = [name,price,link]
return info
def amazonScrape(link):
link = link.strip()
response = requests.get(link,headers=header)
root = html.fromstring(response.content)
name = root.xpath('//span[@id="productTitle"]/text()')[0].strip()
leftDecPrice = root.xpath('//span[@class="price-large"]/text()')[0].strip()
rightDecPrice = root.xpath('//span[@class="a-size-small price-info-superscript"]/text()')[1].strip()
price = "{}.{}".format(leftDecPrice,rightDecPrice)
info = {"Name":name,"Price":price,"URL":link}
info = [name,price,link]
return info
def scrapePartData(srcFile):
with open(srcFile,"r") as r:
linkList = r.readlines()
for item in range(len(linkList)):
if "https://www.amazon.com/" in linkList[item]:
linkList[item] = amazonScrape(linkList[item])
elif "https://www.adafruit.com/" in linkList[item]:
linkList[item] = adafruitScrape(linkList[item])
return linkList
def defaultFile():
filelist = ["data/"+item for item in os.listdir("data")]
return filelist[0] | 1,666 | 579 |
import pdb
import subprocess
from functools import partial, wraps
from math import prod
from pathlib import Path
from pprint import pprint
from string import (
ascii_lowercase,
digits as ascii_digits,
)
from typing import Any, Callable, List, Iterable, Optional, Union
from toolz import ( # type: ignore
compose_left,
concat,
curry,
do,
excepts,
keyfilter,
pluck,
pipe,
unique,
)
IterableS = Iterable[str]
hexc = ["a", "b", "c", "d", "e", "f"] + list(ascii_digits)
def toolz_pick(keep: IterableS, d: dict) -> dict:
return keyfilter(lambda x: x in keep, d)
def toolz_omit(remove: IterableS, d: dict) -> dict:
return keyfilter(lambda x: x not in remove, d)
def pick(keep: IterableS, d: dict) -> dict:
return {k: d[k] for k in d if k in keep}
def omit(remove: IterableS, d: dict) -> dict:
return {k: d[k] for k in d if k not in remove}
def add_debug(debug_f: Callable, orig_f: Callable) -> Callable:
"""
Transforms the function such that output is passed
to the debug function before being returned as normal.
add_debug(print, str.upper) would return a function equivalent to:
def fn(val: str): -> str
result = str.upper(val)
print(result)
return result
"""
do_f = partial(do, debug_f)
return compose_left(orig_f, do_f)
def add_debug_list(debug_f: Callable, funcs: List[Callable]) -> List[Callable]:
"""
Transforms each of the functions such that the output of each is passed
to the debug function before being returned as normal.
"""
return [add_debug(debug_f, f) for f in funcs]
def run_process(
command: Union[list, str], options: Optional[dict] = None
) -> subprocess.CompletedProcess:
base_opts = {"check": True, "text": True, "capture_output": True}
opts = options if options else {}
# pylint: disable=subprocess-run-check
# return subprocess.run(command, **{**base_opts, **opts}) # type: ignore
return subprocess.run(command, **(base_opts | opts)) # type: ignore
def until_stable(func: Callable) -> Callable:
"""
Repeatedly call the same function on its arguments until the result doesn't
change.
Not sure how to make this work in variadic cases; comparing a single result
to *args doesn't seem to work.
"""
def inner(arg: Any, **kwds: Any) -> Any:
if func(arg, **kwds) == arg:
return arg
return inner(func(arg, **kwds))
return inner
def oxford(lst: List[str]) -> str:
"""
Turns a list into a properly-formatted list phrase.
``["something"]`` becomes "something".
``["thing1", "thing2"]`` becomes "thing1 and thing2".
``["thing1", "thing2", "thing3"]`` becomes "thing1, thing2, and thing3".
``["a", "b", "c", "d"]`` becomes "a, b, c, and d".
"""
if len(lst) <= 2:
return " and ".join(lst)
return f'{", ".join(lst[:-1])}, and {lst[-1]}'
def excepts_wrap(err: Any, err_func: Callable) -> Callable:
"""
This basically means that::
@excepts_wrap(ValueError, lambda _: None)
def get_formatted_time(fmt: str, value: str) -> Optional[datetime]:
return datetime.strptime(value.strip(), fmt)
gft = get_formatted_time
With the decorator, that's broadly equivalent to this without
any decorator::
gft = excepts(
ValueError,
get_formatted_time,
lambda _: None
)
"""
def inner_excepts_wrap(fn: Callable) -> Callable:
return excepts(err, fn, err_func)
return inner_excepts_wrap
lfilter = compose_left(filter, list) # lambda f, l: [*filter(f, l)]
lmap = compose_left(map, list) # lambda f, l: [*map(f, l)]
lpluck = compose_left(pluck, list) # lambda k, l: [*pluck(f, l)]
c_map = curry(map)
c_lmap = curry(lmap)
is_char_az = partial(lambda y, x: x in y, ascii_lowercase)
is_char_hex = partial(lambda y, x: x in y, hexc)
is_char_az09 = partial(lambda y, x: x in y, ascii_lowercase + ascii_digits)
filter_str = partial(lambda f, s: "".join(filter(f, s)))
filter_az = partial(filter_str, is_char_az)
filter_az09 = partial(filter_str, is_char_az09)
filter_hex = partial(filter_str, is_char_hex)
add_pprint = partial(add_debug, pprint)
add_pprinting = partial(lmap, add_pprint)
lcompact = partial(lfilter, None)
def group_to_unique(group):
string = "".join(group)
return list(unique(string))
def group_to_unan(group):
group = lcompact(group)
total = list(concat(group))
return sum([1 for i in unique(total) if total.count(i) == len(group)])
unan = 0
for c in unique(total):
if total.count(c) == len(group):
unan = unan + 1
return unan
def unanimous(group):
pass
def process(text):
groups = lcompact(_.split("\n") for _ in text.split("\n\n"))
ugr = lmap(group_to_unique, groups)
count = sum([len(_) for _ in ugr])
unangr = lmap(group_to_unan, groups)
pdb.set_trace()
return
if __name__ == "__main__":
# test = Path("test-input-00.txt").read_text().strip()
# test_answer = whatever
# assert process(test, params) == test_answer
raw = Path("input-06.txt").read_text()
raw = raw.strip() # comment this out if trailing stuff is important!
result = process(raw)
| 5,288 | 1,843 |
from abc import ABC, abstractmethod
class Abstract(ABC):
PRICE = 0
@abstractmethod
def __init__(self, name, age):
self.name = name
self.age = age
@abstractmethod
def show_name_capitalize(self):
return self.name.title()
@abstractmethod
def show_price(self):
return self.__class__.PRICE | 349 | 107 |
import npyscreen
import os
import getpass
import subprocess
class mainform(npyscreen.ActionForm):
def create(self):
self.OSPath = "/SYS64 3.7/"
self.full_path = os.getcwd()
self.CurrentPath = ""
self.dir_path = os.path.dirname(os.path.realpath(__file__))
opt_values = ["jdos", "jdos.fboot", "jdos.admin", "jdos.exp"]
welcome_box = self.add(npyscreen.BoxTitle,
max_height=2,
editable=False,
value="Welcome. Select BOOT PARAMETER")
self.answer = self.add(npyscreen.TitleSelectOne,
max_height=4,
name="Selections:",
values=opt_values,
scroll_exit=True)
def on_ok(self):
if self.answer.value[0] == 0:
subprocess.call(
["python3", self.dir_path + self.OSPath + "bootthingy.py"])
subprocess.call(
["python3", self.dir_path + self.OSPath + "jdosos.py"])
elif self.answer.value[0] == 1:
subprocess.call(
["python3", self.full_path + self.OSPath + "jdosos.py"])
elif self.answer.value[0] == 2:
user = getpass.getpass("username: ")
password = getpass.getpass("password: ")
bootcheck = 0
with open('userpass.txt', 'r') as file:
for line in file:
line = line.strip('\n')
login = line.split(',')
if login[0] == user and login[1] == password:
subprocess.call([
"python3",
self.full_path + self.OSPath + "jdososadmin.py"
])
bootcheck = 1
if bootcheck == 0:
print("Incorrect user or password.")
elif self.answer.value[0] == 3:
subprocess.call([
"python3",
self.full_path + self.OSPath + "jdosexperimentail.py"
])
self.parentApp.setNextForm(None)
def on_cancel(self):
self.parentApp.setNextForm(None)
#Application starts here
class App(npyscreen.NPSAppManaged):
#Defining forms.
def onStart(self):
self.addForm('MAIN', mainform, name="name_here")
if __name__ == "__main__":
app = App().run() | 2,463 | 704 |
#!/usr/bin/python
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Get all Version information from Xcode."""
import os.path
from autopkglib import Processor
try:
# python 2
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
__all__ = ["XcodeVersionEmitter"]
class XcodeVersionEmitter(Processor):
"""Output a version number based on the URL. Skipped by default."""
description = __doc__
input_variables = {
"dont_skip": {
"required": False,
"default": False,
"description": ("If this evaluates as truthy, do not skip this step."),
},
"url": {"required": True, "description": ("URL to parse the version from.")},
"output_filepath": {
"required": True,
"description": ("Path to which xcode version tag is emitted."),
},
}
output_variables = {
"derived_filename": {"description": "The derived filename to emit."}
}
__doc__ = description
def main(self):
"""Main."""
if not self.env["dont_skip"]:
self.output("dont_skip is false, so skipping this Processor.")
return
url = self.env["url"]
url_split_object = urlsplit(url)
# "https://download.developer.apple.com/Developer_Tools/Xcode_10.2.1/Xcode_10.2.1.xip" # noqa
# "https://developer.apple.com//services-account/download?path=/Developer_Tools/Xcode_11_Beta_2/Xcode_11_Beta_2.xip" # noqa
filename = os.path.splitext(os.path.basename(url_split_object.path))[0].lower()
self.output("Derived filename: {}".format(filename))
self.env["derived_filename"] = filename
destination = os.path.expandvars(self.env["output_filepath"])
with open(destination, "w") as f:
f.write(filename)
self.output(
"Derived filename ({}) written to disk at {}".format(
filename, destination
)
)
if __name__ == "__main__":
PROCESSOR = XcodeVersionEmitter()
PROCESSOR.execute_shell()
| 2,665 | 795 |
3# -*- coding: utf-8 -*-
"""
Created on Thu May 28 14:31:02 2020
@author: Lucas
"""
import random
acertos = 0
erros = 0
perguntas = 0
print("Programa educativo matematico")
print("""Digite a opção desejada
[1] para Soma
[2] para subtração
[3] para divisão
[4] para multiplicação""")
opção = int(input("Digite sua opção: "))
perguntas2 = int(input("Quantas perguntas quer fazer? "))
if opção ==1:
while perguntas < perguntas2:
aleatórios = random.randint(1,100)
aleatório2 = random.randint(1,100)
soma = int(input(f"Quanto vale {aleatórios} + {aleatório2} ? = "))
if soma == (aleatórios + aleatório2):
acertos +=1
print("Acertou")
else:
print("Errou,a resposta é",(aleatórios + aleatório2))
erros += 1
perguntas += 1
print(f"Voce acertou {acertos} e errou {erros} questões")
if acertos > acertos*0.6:
print("Parabéns, voce está indo muito bem, continue assim! XD")
else:
print("Precisa estudar mais,heim >.<")
elif opção ==2:
while perguntas < perguntas2:
aleatórios = random.randint(1,100)
aleatório2 = random.randint(1,100)
subtração = int(input(f"Quanto vale {aleatórios} - {aleatório2} ? = "))
if subtração == (aleatórios - aleatório2):
acertos +=1
print("Acertou")
else:
print("Errou feio errou rude,a resposta é",(aleatórios - aleatório2))
erros += 1
perguntas += 1
print(f"Voce acertou {acertos} e errou {erros} questões")
if acertos > acertos*0.6:
print("Parabéns, voce está indo muito bem, continue assim! XD")
else:
print("Precisa estudar mais,heim >.<")
elif opção ==3:
print("Use apenas duas casas decimais")
while perguntas < perguntas2:
aleatórios = random.randint(1,100)
aleatório2 = random.randint(1,100)
divisão = float(input(f"Quanto vale {aleatórios} : {aleatório2} ? = "))
if divisão == round((aleatórios / aleatório2),2):
acertos +=1
print("Acertou")
else:
print(f"Errou ,a resposta é {(aleatórios / aleatório2):.2f}")
erros += 1
perguntas += 1
print(f"Voce acertou {acertos} e errou {erros} questões")
if acertos > acertos*0.6:
print("Parabéns, voce está indo muito bem, continue assim! XD")
else:
print("Precisa estudar mais,heim >.<")
elif opção ==4:
while perguntas < perguntas2:
aleatórios = random.randint(1,100)
aleatório2 = random.randint(1,100)
multiplicação = int(input(f"Quanto vale {aleatórios} * {aleatório2} ? = "))
if multiplicação == (aleatórios * aleatório2):
acertos +=1
print("Acertou")
else:
print("Errou ,a resposta é",(aleatórios * aleatório2))
erros += 1
perguntas += 1
print(f"Voce acertou {acertos} e errou {erros} questões")
if acertos > acertos*0.6:
print("Parabéns, voce está indo muito bem, continue assim! XD")
else:
print("Precisa estudar mais,heim >.<")
else:
print("Oxe,essa opção não existe,ta loucona?")
| 3,209 | 1,194 |
# taken largely from https://github.com/ianvonseggern1/note-prediction
from pydub import AudioSegment
import pydub.scipy_effects
import numpy as np
import scipy
import matplotlib.pyplot as plt
from solo_generation_esac import *
from utils import frequency_spectrum, \
calculate_distance, \
classify_note_attempt_1, \
classify_note_attempt_2, \
classify_note_attempt_3
def main(file, note_arr=None, plot_starts=False, plot_fft_indices=[]):
actual_notes = []
if note_arr:
actual_notes = note_arr
song = AudioSegment.from_file(file)
#song = song.high_pass_filter(80, order=4)
starts = predict_note_starts(song, plot_starts)
predicted_notes = predict_notes(song, starts, plot_fft_indices)
print("")
if actual_notes:
print("Actual Notes")
print(actual_notes)
print("Predicted Notes")
print(predicted_notes)
if actual_notes:
lev_distance = calculate_distance(predicted_notes, actual_notes)
score = abs(len(actual_notes) - lev_distance)/len(actual_notes)
print("Levenshtein distance: {}/{}".format(lev_distance, len(actual_notes)))
return score
# Very simple implementation, just requires a minimum volume and looks for left edges by
# comparing with the prior sample, also requires a minimum distance between starts
# Future improvements could include smoothing and/or comparing multiple samples
#
# song: pydub.AudioSegment
# plot: bool, whether to show a plot of start times
# actual_starts: []float, time into song of each actual note start (seconds)
#
# Returns perdicted starts in ms
def predict_note_starts(song, plot):
# Size of segments to break song into for volume calculations
SEGMENT_MS = 50
# Minimum volume necessary to be considered a note
VOLUME_THRESHOLD = -27.8
# The increase from one sample to the next required to be considered a note
EDGE_THRESHOLD = 0.09
# Throw out any additional notes found in this window
MIN_MS_BETWEEN = 100
# Filter out lower frequencies to reduce noise
#song = song.high_pass_filter(80, order=4)
# dBFS is decibels relative to the maximum possible loudness
volume = [segment.dBFS for segment in song[::SEGMENT_MS]]
predicted_starts = []
for i in range(1, len(volume)):
if volume[i] > VOLUME_THRESHOLD and volume[i] - volume[i - 1] > EDGE_THRESHOLD:
ms = i * SEGMENT_MS
# Ignore any too close together
if len(predicted_starts) == 0 or ms - predicted_starts[-1] >= MIN_MS_BETWEEN:
predicted_starts.append(ms)
#predicted_starts.append(ms)
#for i in range(len(predicted_starts)-2):
# if predicted_starts[i+1] - predicted_starts[i] <= MIN_MS_BETWEEN:
# predicted_starts.remove(predicted_starts[i])
# Plot the volume over time (sec)
if plot:
x_axis = np.arange(len(volume)) * (SEGMENT_MS / 1000)
plt.plot(x_axis, volume)
# Add vertical lines for predicted note starts and actual note starts
for ms in predicted_starts:
plt.axvline(x=(ms / 1000), color="g", linewidth=0.5, linestyle=":")
plt.show()
return predicted_starts
def predict_notes(song, starts, plot_fft_indices):
predicted_notes = []
for i, start in enumerate(starts):
sample_from = start + 50
sample_to = start + 200
if i < len(starts) - 1:
sample_to = min(starts[i + 1], sample_to)
segment = song[sample_from:sample_to]
freqs, freq_magnitudes = frequency_spectrum(segment)
predicted = classify_note_attempt_2(freqs, freq_magnitudes)
predicted_notes.append(predicted or "U")
# Print general info
print("")
print("Note: {}".format(i))
print("Predicted start: {}".format(start))
length = sample_to - sample_from
print("Sampled from {} to {} ({} ms)".format(sample_from, sample_to, length))
print("Frequency sample period: {}hz".format(freqs[1]))
# Print peak info
peak_indicies, props = scipy.signal.find_peaks(freq_magnitudes, height=0.015)
print("Peaks of more than 1.5 percent of total frequency contribution:")
for j, peak in enumerate(peak_indicies):
freq = freqs[peak]
magnitude = props["peak_heights"][j]
print("{:.1f}hz with magnitude {:.3f}".format(freq, magnitude))
if i in plot_fft_indices:
plt.plot(freqs, freq_magnitudes, "b")
plt.xlabel("Freq (Hz)")
plt.ylabel("|X(freq)|")
plt.show()
return predicted_notes
if __name__ == "__main__":
main("untitled.wav", note_arr=["C", "D", "E", "F", "G", "A"], plot_starts=True)
| 4,763 | 1,543 |
# Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Dict
import gpflow
import numpy as np
import pytest
import tensorflow as tf
from gpflow.models import GPMC
from gpflux.models import DeepGP
from tests.util.models.gpflux.models import two_layer_dgp_model
from tests.util.models.models import fnc_3x_plus_10
from trieste.models import TrainableProbabilisticModel
from trieste.models.gpflux import DeepGaussianProcess, GPfluxModelConfig
def test_gpflux_model_config_raises_not_supported_model_type() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
model_specs = {"model": GPMC((x, y), gpflow.kernels.Matern32(), gpflow.likelihoods.Gaussian())}
with pytest.raises(NotImplementedError):
GPfluxModelConfig(**model_specs)
def test_gpflux_model_config_has_correct_supported_models() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model_specs = {"model": two_layer_dgp_model(x)}
model_config = GPfluxModelConfig(**model_specs)
models_mapping: Dict[
Any, Callable[[Any, tf.optimizers.Optimizer], TrainableProbabilisticModel]
] = {
DeepGP: DeepGaussianProcess,
}
assert model_config.supported_models() == models_mapping
def test_gpflux_model_config_has_correct_default_optimizer() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model_specs = {"model": two_layer_dgp_model(x)}
model_config = GPfluxModelConfig(**model_specs)
default_optimizer = tf.optimizers.Adam
assert isinstance(model_config.optimizer, default_optimizer)
def test_gpflux_model_config_allows_changing_default_optimizer() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model_specs = {
"model": two_layer_dgp_model(x),
"optimizer": tf.optimizers.RMSprop(),
}
model_config = GPfluxModelConfig(**model_specs)
expected_optimizer = tf.optimizers.RMSprop
assert isinstance(model_config.optimizer, expected_optimizer)
| 2,720 | 929 |
# Generated by Django 2.1.2 on 2018-11-23 23:42
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=255, unique=True)),
('friend_id', django.contrib.postgres.fields.ArrayField(base_field=models.BigIntegerField(unique=True), null=True, size=None)),
('date_added', models.DateTimeField()),
('date_modified', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'contact_list',
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=255, unique=True)),
('name', models.TextField(blank=True, null=True)),
('profile_img', models.BinaryField(blank=True, null=True)),
('profile_description', models.TextField(blank=True, null=True)),
('profile_img_str', models.TextField(null=True)),
],
options={
'db_table': 'profiles',
},
),
]
| 1,558 | 429 |
from compas_cloud import Proxy
import time
print("\n starting a new Proxy and by default starts a server in background")
proxy = Proxy(background=True)
time.sleep(3)
print("\n restarting the background server and open a new one in a prompt console")
proxy.background = False
proxy.restart()
time.sleep(3)
print("\n check if the proxy is healthily connected to server")
print(proxy.check())
time.sleep(3)
print("\n shut the the server and quite the program")
proxy.shutdown()
time.sleep(3) | 493 | 146 |
#-----------------SUPERMARKET MANAGEMENT SYSTEM--------------------
items = []
while True:
print('------------------Welcome to the supermarket------------------')
print('1. View items\n2. Add items for sale\n3. Purchase items\n4. Search items \n5. Edit items\n6. Exit')
choice = int(input('Enter the number of your choice : '))
if choice == 1 :
print('------------------View Items------------------')
print('The number of items in the inventory are : %d ' %len(items))
if len(items) != 0:
print('Here are all the items available in the supermarket.')
for item in items:
for key, value in item.items():
print("%s : %s " %(key, value))
elif choice == 2 :
print('------------------Add items------------------')
print('To add an item fill in the form')
item = {}
item['name'] = input('Item name : ')
while True:
try:
item['quantity'] = int(input('Item quantity : '))
break
except ValueError:
print('Quantity should only be in digits')
while True:
try:
item['price'] = int(input('Price $ : '))
break
except ValueError:
print('Price should only be in digits')
print('Item has been successfully added.')
items.append(item)
elif choice == 3 :
print('------------------purchase items------------------')
print(items)
purchase_item = input('which item do you want to purchase? Enter name : ')
purchase_quantity= int(input('Enter the quantity wanted : '))
for item in items:
if purchase_item.lower() == item['name'].lower() :
if item['quantity'] != 0 :
if purchase_quantity <= item['quantity']:
print('Pay %d at checkout counter.' %(item['price']* purchase_quantity))
item['quantity'] -= purchase_quantity
else:
print("Quantity required is not available")
else:
print('item out of stock.')
elif choice == 4 :
print('------------------search items------------------')
find_item = input('Enter the items name to search in inventory : ')
for item in items:
if item['name'].lower() == find_item.lower():
print('The item named ' + find_item + ' is displayed below with its details')
print(item)
else:
print('item not found.')
elif choice == 5 :
print('------------------edit items------------------')
item_name = input('Enter the name of the item that you want to edit : ')
for item in items:
if item_name.lower() == item['name'].lower():
print('Here are the current details of ' + item_name)
print(item)
item['name'] = input('Item name : ')
while True:
try:
item['quantity'] = int(input('Item quantity : '))
break
except ValueError:
print('Quantity should only be in digits')
while True:
try:
item['price'] = int(input('Price $ : '))
break
except ValueError:
print('Price should only be in digits')
print('Item has been successfully updated.')
print(item)
else:
print('Item not found')
elif choice == 6 :
print('------------------exited------------------')
break
else:
print('You entered an invalid option')
| 3,871 | 915 |
"""Contains the links that can be downloaded."""
from hashlib import sha1
import logging
import os
from pathlib import Path
import re
import warnings
from bs4 import BeautifulSoup
from requests import Response
import unidecode
from vcm.core.exceptions import AlgorithmFailureError, MoodleError, ResponseError
from vcm.core.modules import Modules
from vcm.core.networking import Connection
from vcm.core.results import Results
from vcm.core.utils import Patterns, save_crash_context, secure_filename
from vcm.settings import settings
from .alias import Alias
from .filecache import REAL_FILE_CACHE
class _Notify:
NOTIFY = False
@property
def notify(self):
return self.NOTIFY
class BaseLink(_Notify):
"""Base class for Links."""
def __init__(self, name, section, url, icon_url, subject, parent=None):
"""
Args:
name (str): name of the url.
url (str): URL of the url.
icon_url (str or None): URL of the icon.
subject (vcm.subject.Subject): subject of the url.
parent (BaseLink): object that created self.
"""
self.name = name.strip()
self.section = section
self.url = url
self.icon_url = icon_url
self.subject = subject
self.connection = Connection()
self.parent = parent
self.response: Response = None
self.soup: BeautifulSoup = None
self.filepath: Path = None
self.redirect_url = None
self.response_name = None
self.subfolders = []
self.logger = logging.getLogger(__name__)
self.logger.debug(
"Created %s(name=%r, url=%r, subject=%r)",
self.__class__.__name__,
self.name,
self.url,
self.subject.name,
)
@property
def content_disposition(self):
if self.response is None:
raise RuntimeError("Response not made yet")
return unidecode.unidecode(self.response.headers["Content-Disposition"])
def append_subfolder(self, dirname):
dirname = secure_filename(dirname)
return self.subfolders.append(dirname)
def insert_subfolder(self, index, dirname):
dirname = secure_filename(dirname)
return self.subfolders.insert(index, dirname)
def create_subfolder(self):
"""Creates the subfolder, if it is configured."""
self.create_subject_folder()
if not self.filepath:
self.autoset_filepath()
folder: Path = self.filepath.parent
if not folder.exists():
os.makedirs(folder.as_posix(), exist_ok=True)
self.logger.debug("Created subfolder %r", folder.as_posix())
else:
self.logger.debug("Subfolder already exists %r", folder.as_posix())
@staticmethod
def _process_filename(filepath: str):
"""Quits some characters from the filename that can not be in a filepath.
Args:
filepath (st): filepath to process.
Returns:
str: filepath processed.
"""
filepath = filepath.replace(">", " mayor que ")
filepath = filepath.replace("<", " menor que ")
return filepath
@staticmethod
def _filename_to_ext(filename):
"""Returns the extension given a filename."""
return Path(filename).suffix[1:]
def _get_ext_from_response(self):
"""Returns the extension of the filename of the response, got from the Content-Dispotition
HTTP header.
Returns:
str: the extension.
"""
if self.response_name is not None:
return self._filename_to_ext(self.response_name)
try:
# unidecode.unidecode is used to remove accents.
self.response_name = Patterns.FILENAME.search(
self.content_disposition
).group(1)
extension = self._filename_to_ext(self.response_name)
if extension:
return extension
except KeyError:
pass
self.response_name = Path(self.url).name
extension = self._filename_to_ext(self.response_name)
if extension:
return extension
return self.content_type.split("/")[-1]
def create_subject_folder(self):
"""Creates the subject's principal folder."""
return self.subject.create_folder()
def make_request(self):
"""Makes the request for the Link."""
self.logger.debug("Making request")
self.response = self.connection.get(self.redirect_url or self.url)
self.logger.debug(
"Response obtained [%d | %s]", self.response.status_code, self.content_type
)
if 500 <= self.response.status_code <= 599:
raise MoodleError(f"Moodle server replied with {self.response.status_code}")
if self.response.status_code == 408:
self.logger.warning("Received response with code 408, retrying")
return self.make_request()
if not self.response.ok:
raise ResponseError(f"Got HTTP {self.response.status_code}")
def close_connection(self):
warnings.warn(
"Since streams are not used, this method should not be called",
DeprecationWarning,
)
self.logger.debug("Closing connection")
self.response.close()
def process_request_bs4(self):
"""Parses the response with BeautifulSoup with the html parser."""
self.logger.debug("Parsing response (bs4)")
self.soup = BeautifulSoup(self.response.text, "html.parser")
self.logger.debug("Response parsed (bs4)")
def autoset_filepath(self):
"""Determines the filepath of the Link."""
if self.filepath is not None:
self.logger.debug("Filepath is setted, skipping (%s)", self.filepath)
return
if self.response is None:
raise RuntimeError("Request not launched")
filename = secure_filename(
self._process_filename(self.name) + "." + self._get_ext_from_response()
)
self.logger.debug("Initial filename: %s", filename)
temp_filepath = self.subject.folder
if self.subfolders:
temp_filepath.joinpath(*self.subfolders)
if self.section:
temp_filepath /= self.section.name
temp_filepath /= filename
try:
folder_id = self.id
except AttributeError:
folder_id = None
self.filepath = Path(
Alias.id_to_alias(
sha1(self.url.encode()).hexdigest(), temp_filepath.as_posix(), folder_id
)
)
self.logger.debug("Set filepath: %r", self.filepath.as_posix())
def download(self):
"""Wrapper for self.do_download()."""
try:
self.do_download()
finally:
self.response = None
self.soup = None
def do_download(self):
"""Abstract method to download the Link. Must be overridden by subclasses."""
self.logger.debug("Called do_download() but it was not implemented")
raise NotImplementedError
def get_header_length(self):
try:
return int(self.response.headers["Content-Length"])
except KeyError:
return len(self.response.content)
@property
def content_type(self):
if "Content-Type" in self.response.headers:
return self.response.headers["Content-Type"]
return None
def save_response_content(self):
"""Saves the response content to the disk."""
if self.filepath is None:
self.autoset_filepath()
if Modules.current() == Modules.notify:
return
self.create_subfolder()
self.logger.debug(
"filepath in REAL_FILE_CACHE: %s", self.filepath in REAL_FILE_CACHE
)
if self.filepath in REAL_FILE_CACHE:
if REAL_FILE_CACHE[self.filepath] == self.get_header_length():
self.logger.debug(
"File found in cache: Same content (%d)", len(self.response.content)
)
return
self.logger.debug(
"File found in cache: Different content (%d --> %d)",
REAL_FILE_CACHE[self.filepath],
len(self.response.content),
)
Results.print_updated(self.filepath)
else:
self.logger.debug(
"File added to cache: %s [%d]",
self.filepath,
len(self.response.content),
)
REAL_FILE_CACHE[self.filepath] = len(self.response.content)
Results.print_new(self.filepath)
try:
with self.filepath.open("wb") as file_handler:
file_handler.write(self.response.content)
self.logger.debug("File downloaded and saved: %s", self.filepath)
except PermissionError:
self.logger.warning(
"File couldn't be downloaded due to permission error: %s",
self.filepath.name,
)
self.logger.warning(
"Permission error %s -- %s", self.subject.name, self.filepath.name
)
@staticmethod
def ensure_origin(url: str) -> bool:
"""Returns True if the origin is the virtual campus."""
return "uva.es" in url
class Resource(BaseLink):
"""Representation of a resource."""
NOTIFY = True
def __init__(self, name, section, url, icon_url, subject, parent=None):
super().__init__(name, section, url, icon_url, subject, parent)
self.resource_type = "unknown"
def set_resource_type(self, new):
"""Sets a new resource type.
Args:
new (str): new resource type.
"""
self.logger.debug("Set resource type: %r", new)
self.resource_type = new
if self.resource_type == "html":
self.process_request_bs4()
def do_download(self):
"""Downloads the resource."""
self.logger.debug("Downloading resource %r", self.name)
url = self.redirect_url or self.url
if not self.ensure_origin(url):
self.logger.warning(
"Permision denied: URL is outside of campusvirtual.uva.es"
)
return
self.make_request()
if self.response.status_code == 404:
self.logger.error("state code of 404 in url %r [%r]", self.url, self.name)
return None
if "application/pdf" in self.content_type:
self.set_resource_type("pdf")
return self.save_response_content()
if "officedocument.wordprocessingml.document" in self.content_type:
self.set_resource_type("word")
return self.save_response_content()
if (
"officedocument.spreadsheetml.sheet" in self.content_type
or "excel" in self.content_type
):
self.set_resource_type("excel")
return self.save_response_content()
if "officedocument.presentationml.slideshow" in self.content_type:
self.set_resource_type("power-point")
return self.save_response_content()
if "presentationml.presentation" in self.content_type:
self.set_resource_type("power-point")
return self.save_response_content()
if "powerpoint" in self.content_type:
self.set_resource_type("power-point")
return self.save_response_content()
if "msword" in self.content_type:
self.set_resource_type("word")
return self.save_response_content()
if "application/zip" in self.content_type:
self.set_resource_type("zip")
return self.save_response_content()
if "application/g-zip" in self.content_type:
self.set_resource_type("gzip")
return self.save_response_content()
if "application/x-7z-compressed" in self.content_type:
self.set_resource_type("7zip")
return self.save_response_content()
if "x-rar-compressed" in self.content_type:
self.set_resource_type("rar")
return self.save_response_content()
if "text/plain" in self.content_type:
self.set_resource_type("plain")
return self.save_response_content()
if "application/json" in self.content_type:
self.set_resource_type("json")
return self.save_response_content()
if "application/octet-stream" in self.content_type:
self.set_resource_type("octect-stream")
return self.save_response_content()
if "image/jpeg" in self.content_type:
self.set_resource_type("jpeg")
return self.save_response_content()
if "image/png" in self.content_type:
self.set_resource_type("png")
return self.save_response_content()
if "video/mp4" in self.content_type:
self.set_resource_type("mp4")
return self.save_response_content()
if "video/x-ms-wm" in self.content_type:
self.set_resource_type("avi")
return self.save_response_content()
if "text/html" in self.content_type:
self.set_resource_type("html")
self.logger.debug(
"Created forum discussion from forum list: %r, %s",
self.name,
self.url,
)
self.subject.add_link(
Html(
self.name, self.section, self.url, self.icon_url, self.subject, self
)
)
return
if self.response.status_code % 300 < 100:
self.url = self.response.headers["Location"]
self.logger.warning("Redirecting to %r", self.url)
return self.download()
self.logger.error(
"Content not identified: %r (code=%s, header=%r)",
self.url,
self.response.status_code,
self.response.headers,
)
return None
class Folder(BaseLink):
"""Representation of a folder."""
NOTIFY = True
def __init__(self, name, section, url, icon_url, subject, id_, parent=None):
super().__init__(name, section, url, icon_url, subject, parent)
self.id = id_
def make_request(self):
"""Makes the request for the Link."""
self.logger.debug("Making request")
data = {"id": self.id, "sesskey": self.connection.sesskey}
self.response = self.connection.post(self.url, data=data)
self.logger.debug("Response obtained [%d]", self.response.status_code)
def do_download(self):
"""Downloads the folder."""
self.logger.debug("Downloading folder %r", self.name)
self.make_request()
self.save_response_content()
class BaseForum(BaseLink):
"""Representation of a Forum link."""
BASE_DIR = "foros"
def do_download(self):
"""Downloads the resources found in the forum hierarchy."""
raise NotImplementedError
class ForumList(BaseForum):
def do_download(self):
self.logger.debug("Downloading forum list %r", self.name)
self.make_request()
self.process_request_bs4()
themes = self.soup.findAll("td", {"class": "topic starter"})
for theme in themes:
forum = ForumDiscussion(
theme.text,
self.section,
theme.a["href"],
self.icon_url,
self.subject,
self,
)
self.logger.debug(
"Created forum discussion from forum list: %r, %s",
forum.name,
forum.url,
)
self.subject.add_link(forum)
class ForumDiscussion(BaseForum):
# NOTIFY = True
def do_download(self):
self.logger.debug("Downloading forum discussion %r", self.name)
self.make_request()
self.process_request_bs4()
attachments = self.soup.findAll("div", {"class": "attachments"})
images = self.soup.findAll("div", {"class": "attachedimages"})
for attachment in attachments:
try:
resource = Resource(
Path(attachment.text).stem,
self.section,
attachment.a["href"],
attachment.a.img["src"],
self.subject,
self,
)
resource.subfolders = self.subfolders
self.logger.debug(
"Created resource from forum: %r, %s", resource.name, resource.url
)
self.subject.add_link(resource)
except TypeError:
pass
for image_container in images:
real_images = image_container.findAll("img")
for image in real_images:
try:
url = image["href"]
except KeyError:
url = image["src"]
resource = Image(
Path(url).stem, self.section, url, None, self.subject, self
)
resource.subfolders = self.subfolders
self.logger.debug(
"Created resource (image) from forum: %r, %s",
resource.name,
resource.url,
)
self.subject.add_link(resource)
class Delivery(BaseLink):
"""Representation of a delivery link."""
NOTIFY = True
def do_download(self):
"""Downloads the resources found in the delivery."""
self.logger.debug("Downloading delivery %r", self.name)
self.make_request()
self.process_request_bs4()
links = []
containers = self.soup.findAll("a", {"target": "_blank"})
for container in containers:
url = container["href"]
if self.ensure_origin(url):
icon_url = container.parent.img["src"]
valid = True
else:
icon_url = self.icon_url
valid = False
resource = Resource(
Path(container.text).stem,
self.section,
container["href"],
icon_url,
self.subject,
self,
)
resource.subfolders = self.subfolders
self.logger.debug(
"Created resource from delivery: %r, %s", resource.name, resource.url
)
links.append(resource)
names = [link.name for link in links]
dupes = {x for x in names if names.count(x) > 1}
dupes_counters = {x: 1 for x in dupes}
if dupes:
for i, _ in enumerate(links):
if links[i].name in dupes:
name = links[i].name
links[i].name += "_" + str(dupes_counters[name])
dupes_counters[name] += 1
self.logger.debug("Changed name %r -> %r", name, links[i].name)
for link in links:
self.subject.add_link(link)
class BaseUndownloableLink(BaseLink):
"""Represents a link which can not be downloaded."""
def do_download(self):
"""Doens't do anything, because this is an unparseable link."""
class_name = type(self).__name__.lower()
self.logger.debug("Downloading %s %r", class_name, self.name)
self.logger.info("%s links are unparseable.", class_name.title())
class Chat(BaseUndownloableLink):
"""Representation of a chat link."""
NOTIFY = True
class Page(BaseUndownloableLink):
"""Representation of a page link."""
NOTIFY = True
class Url(BaseUndownloableLink):
"""Representation of an url link."""
NOTIFY = True
class Kalvidres(BaseUndownloableLink):
"""Representation of a kalvidres link.
A Kalvidres is some kind of video, but it can't be downloaded yet due to lack of I+D.
"""
NOTIFY = True
class Quiz(BaseUndownloableLink):
"""Representation of a quiz link."""
NOTIFY = True
class BlackBoard(BaseUndownloableLink):
"""Representation of a blackboard link.
A blackboard is a link to a VoIP chat.
"""
NOTIFY = True
class Html(BaseLink):
def do_download(self):
"""Downloads the resources found in a html web page."""
self.logger.debug("Downloading html %r", self.name)
self.make_request()
self.process_request_bs4()
self.logger.debug("Parsing HTML (%r)", self.url)
try:
name = self.soup.find("div", {"role": "main"}).h2.text
except AttributeError:
# Check if it is a weird page
if self.soup.find("applet"):
self.logger.debug("Identified as weird page without content, skipping")
return
raise
return self.try_algorithms(name)
def try_algorithms(self, name):
# call actual algorithms
algorithms = [x for x in dir(self) if x.startswith("check_algorithm")]
algorithms = [getattr(self, x) for x in algorithms]
algorithms.sort(key=lambda x: x.__name__)
for algorithm in algorithms:
resource = algorithm(name)
if resource:
break
else:
# If not parsed:
return self.handle_algorithm_failure()
# If everithing ok:
self.logger.debug(
"Created resource from HTML: %r, %s", resource.name, resource.url
)
self.subject.add_link(resource)
return
def check_algorithm_1(self, name):
try:
resource = self.soup.find("object", {"id": "resourceobject"})
assert resource
return Resource(
name, self.section, resource["data"], self.icon_url, self.subject, self
)
except AssertionError:
return None
def check_algorithm_2(self, name):
try:
resource = self.soup.find("iframe", {"id": "resourceobject"})
assert resource
return Resource(
name, self.section, resource["src"], self.icon_url, self.subject, self
)
except AssertionError:
return None
def check_algorithm_3(self, name):
try:
container = self.soup.find("div", {"class": "resourceworkaround"})
return Resource(
name,
self.section,
container.a["href"],
self.icon_url,
self.subject,
self,
)
except AttributeError:
return None
def check_algorithm_4(self, name):
try:
resource = self.soup.find("div", class_="resourcecontent resourceimg")
assert resource
return Resource(
name,
self.section,
resource.img["src"],
self.icon_url,
self.subject,
self,
)
except AssertionError:
return None
def handle_algorithm_failure(self):
self.logger.error("HTML ALGORITHM FAILURE")
save_crash_context(
self.response,
"html-algorithm-failure",
"html algorithm failure",
)
raise AlgorithmFailureError
class Image(BaseLink):
def do_download(self):
self.make_request()
match = re.search(r"image/(\w+)", self.content_type)
if not match:
raise RuntimeError
image_type = match.group(1)
self.logger.debug("Identified image as %r", image_type)
self.icon_url = "https://campusvirtual.uva.es/invalid/f/" + image_type
return self.save_response_content()
| 23,993 | 6,676 |
def main():
import pipedef
pipe = pipedef.Pipeline()
# ============================== GLOBAL PROPERTIES =================================
# global pipeline config
pipe.config = {
'_pipeline:_edge': {'capacity': 5},
}
# ============================== INPUT FRAME LIST ==================================
input = pipe.add_process(name='input', type='frame_list_input', config={
'image_reader:type' : 'vxl',
'image_list_file' : 'input_list.txt',
'frame_time' : 0.03333,
})
input.iports.define()
input.oports.define('image', 'timestamp', 'image_file_name')
# ================================== DETECTOR ======================================
detector = pipe.add_process(name='detector', type='image_object_detector', config={
'detector:type': 'darknet',
# Network config
':detector:darknet:net_config' : '../detector_pipelines/models/model2.cfg',
':detector:darknet:weight_file' : '../detector_pipelines/models/model2.weights',
':detector:darknet:class_names' : '../detector_pipelines/models/scallop_and_fish.lbl',
# Detector parameters
':detector:darknet:thresh' : 0.001,
':detector:darknet:hier_thresh' : 0.001,
':detector:darknet:gpu_index' : 0,
# Image scaling parameters
':detector:darknet:resize_option': 'maintain_ar',
':detector:darknet:resize_ni': 544,
':detector:darknet:resize_nj': 544,
':detector:darknet:scale': 1.0,
})
detector.iports.define('image')
detector.oports.define('detected_object_set')
detector_writer = pipe.add_process(name='detector_writer', type='detected_object_output', config={
# Type of file to output
':file_name': 'output/individual_detections.kw18',
':writer:type': 'kw18',
# Write out FSO classifications alongside tracks
':writer:kw18:write_tot': True,
':writer:kw18:tot_field1_ids': 'fish',
':writer:kw18:tot_field2_ids': 'scallop',
})
detector_writer.iports.define('detected_object_set', 'image_file_name')
detector_writer.oports.define()
input.oports.connect({
'image': detector.iports['image'],
'image_file_name': detector_writer.iports['image_file_name'],
})
detector.oports.connect({
'detected_object_set': detector_writer.iports['detected_object_set'],
})
# Note these other alternative ways of creating edges
# input.oports['image'].connect(detector.iports['image'])
# input.oports['image'].connect(detector_writer.iports['image_file_name'])
# detector.oports['detected_object_set'].connect(detector_writer.iports['detected_object_set'])
# input.oports['image'].connect(detector.iports['image']) # closer to syntax of a .pipe file
# input.oports.connect({'image': detector.iports['image']}) # closer to syntax of a .pipe file
# detector.iports.connect(**input.oports) # can use if input and output ports share names
# detector.iports.connect({'image': input.oports['image']}) # closer to the syntax of a function call
# ================================ CORE TRACKER ===================================
detection_descriptor = pipe.add_process(name='detection_descriptor', type='compute_track_descriptors')
detection_descriptor.config = {
':inject_to_detections' : True,
':computer:type' : 'burnout',
':computer:burnout:config_file' : 'detection_descriptors.conf',
}
detection_descriptor.iports.define('image', 'timestamp', 'detected_object_set')
detection_descriptor.oports.define('detected_object_set')
tracker = pipe.add_process(name='tracker', type='compute_association_matrix')
tracker.config = '''
:matrix_generator:type from_features
:matrix_generator:from_features:max_distance 40
block matrix_generator:from_features:filter
:type class_probablity_filter
:class_probablity_filter:threshold 0.001
:class_probablity_filter:keep_all_classes false
:class_probablity_filter:keep_classes fish;scallop
endblock
'''
tracker.iports.define('image', 'timestamp', 'detected_object_set', 'object_track_set')
tracker.oports.define('matrix_d', 'object_track_set', 'detected_object_set')
track_associator = pipe.add_process(name='track_associator', type='associate_detections_to_tracks')
track_associator.config = '''
:track_associator:type threshold
:track_associator:threshold:threshold 100.0
:track_associator:threshold:higher_is_better false
'''
track_associator.iports.define('image', 'timestamp', 'matrix_d', 'object_track_set', 'detected_object_set')
track_associator.oports.define('object_track_set', 'unused_detections')
track_initializer = pipe.add_process(name='track_initializer', type='initialize_object_tracks')
track_initializer.config = '''
:track_initializer:type threshold
block track_initializer:threshold:filter
:type class_probablity_filter
:class_probablity_filter:threshold 0.001
:class_probablity_filter:keep_all_classes false
:class_probablity_filter:keep_classes fish;scallop
endblock
'''
track_initializer.iports.define('image', 'timestamp', 'object_track_set', 'detected_object_set')
track_initializer.oports.define('object_track_set')
# To use the star notation the input ports and output ports must have the
# same name. Currently you must also define the ports. Eventually we might
# read them from sprokit.
# Connect inputs to detection descriptor
detection_descriptor.iports.connect(**input.oports, **detector.oports)
# Connect inputs to tracker
tracker.iports.connect(**input.oports, **detection_descriptor.oports, **track_initializer.oports)
# Connect inputs to track_associator
track_associator.iports.connect(**input.oports, **tracker.oports)
# Connect inputs to track_initializer
track_initializer.iports.connect(
detected_object_set=track_associator.oports['unused_detections'],
**input.oports, **track_associator.oports)
# ================================= INDEX DATA ====================================
track_writer = pipe.add_process(name='track_writer', type='write_object_track')
track_writer.iports.define('object_track_set')
track_writer.config = '''
:file_name output_tracks.kw18
:writer:type kw18
'''
# Connect inputs to track writer
track_writer.iports.connect(**track_initializer.oports)
return pipe
if __name__ == '__main__':
r"""
CommandLine:
source ~/code/VIAME/build/install/setup_viame.sh
cd /home/joncrall/code/VIAME/examples/tracking_pipelines
~/code/VIAME/build/install/bin/pipe_to_dot -p simple_tracker.pipe -o g.dot
dot -Tpng g.dot > g.png
python ~/code/VIAME/examples/tracking_pipelines/define_simple_tracker.py
"""
pipe = main()
pipe.write('auto_simple_tracker.pipe')
pipe.draw_graph('pipeline.png')
import ubelt as ub
ub.startfile('pipeline.png')
| 7,472 | 2,332 |
from flask import Flask, request, render_template
import pandas as pd
import joblib
# Declare a Flask app
app = Flask(__name__)
def model_predict(i):
if i==1:
return "Normal"
else:
return "Abnormal"
@app.route('/', methods=['GET', 'POST'])
# Main function here
def main():
# If a form is submitted
if request.method == "POST": # Displaying Result based on values retrieved from Get (Front End)
# Unpickle classifier
gbc = joblib.load("gbc.pkl")
# Get values through input bars
pelvic_incidence = request.form.get("Pelvic_incidence")
pelvic_tilt = request.form.get("Pelvic_tilt")
lumbar_lordosis_angle = request.form.get("Lumbar_Lordosis_Angle")
sacral_slope = request.form.get("Sacral_slope")
pelvic_radius = request.form.get("Pelvic_radius")
degree_spondylolisthesis = request.form.get("Degree_spondylolisthesis")
pelvic_slope = request.form.get("Pelvic_slope")
Direct_tilt = request.form.get("Direct_tilt")
thoracic_slope = request.form.get("Thoracic_slope")
cervical_tilt = request.form.get("Cervical_tilt")
sacrum_angle = request.form.get("Sacrum_angle")
scoliosis_slope = request.form.get("Scoliosis_slope")
# Put inputs to dataframe
X = pd.DataFrame([[pelvic_incidence,pelvic_tilt,lumbar_lordosis_angle,sacral_slope,pelvic_radius,degree_spondylolisthesis,pelvic_slope,Direct_tilt,thoracic_slope,cervical_tilt,sacrum_angle,scoliosis_slope]], columns = ["Col1", "Col2","Col3","Col4","Col5","Col6","Col7","Col8","Col9","Col10","Col11","Col12"])
# Get prediction
prediction = model_predict(gbc.predict(X)[0])
else:
prediction = ""
return render_template("website.html", output = prediction)
# Running the app
if __name__ == '__main__':
app.run(debug = True)
| 1,918 | 665 |
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('faq', views.faq, name='faq'),
path('account/me/password/', views.change_password, name='user-password'),
path('account/me/', views.view_user, name='user'),
path('account/<int:user_id>/', views.view_user, name='user-by-id'),
path('account/me/edit/', views.edit_user, name='user-edit'),
path('accounts/', views.all_users, name='user-list'),
path('accounts/reset/<uidb64>/<token>/', views.MyPasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('accounts/', include('django.contrib.auth.urls')),
path('ajax/door', views.door, name='ajax-door'),
path('ajax/projector', views.projector, name='ajax-projector'),
path('rest/card_id', views.is_student_card_id_in_db, name='card-id'),
]
| 864 | 296 |
import unittest
from dcp.leetcode.str.find_and_replace import findReplaceString
class Test_FindAndReplace(unittest.TestCase):
def test_case1(self):
str_in = "jjievdtjfb"
indexes, sources, targets = [4,6,1], ["md","tjgb","jf"], ["foe","oov","e"]
actual = findReplaceString(str_in, indexes, sources, targets)
expected = "jjievdtjfb"
assert actual == expected | 424 | 142 |
import json
from abc import ABCMeta, abstractmethod
import six
from moto.sts.models import ACCOUNT_ID
@six.add_metaclass(ABCMeta)
class TestConfig:
"""Provides the interface to use for creating test configurations.
This class will provide the interface for what information will be
needed for the SageMaker CloudFormation tests. Ultimately, this will
improve the readability of the tests in `test_sagemaker_cloudformation.py`
because it will reduce the amount of information we pass through the
`pytest.mark.parametrize` decorator.
"""
@property
@abstractmethod
def resource_name(self):
pass
@property
@abstractmethod
def describe_function_name(self):
pass
@property
@abstractmethod
def name_parameter(self):
pass
@property
@abstractmethod
def arn_parameter(self):
pass
@abstractmethod
def get_cloudformation_template(self, include_outputs=True, **kwargs):
pass
class NotebookInstanceTestConfig(TestConfig):
"""Test configuration for SageMaker Notebook Instances."""
@property
def resource_name(self):
return "TestNotebook"
@property
def describe_function_name(self):
return "describe_notebook_instance"
@property
def name_parameter(self):
return "NotebookInstanceName"
@property
def arn_parameter(self):
return "NotebookInstanceArn"
def get_cloudformation_template(self, include_outputs=True, **kwargs):
instance_type = kwargs.get("instance_type", "ml.c4.xlarge")
role_arn = kwargs.get(
"role_arn", "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
)
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
self.resource_name: {
"Type": "AWS::SageMaker::NotebookInstance",
"Properties": {"InstanceType": instance_type, "RoleArn": role_arn},
},
},
}
if include_outputs:
template["Outputs"] = {
"Arn": {"Value": {"Ref": self.resource_name}},
"Name": {
"Value": {
"Fn::GetAtt": [self.resource_name, "NotebookInstanceName"]
}
},
}
return json.dumps(template)
class NotebookInstanceLifecycleConfigTestConfig(TestConfig):
"""Test configuration for SageMaker Notebook Instance Lifecycle Configs."""
@property
def resource_name(self):
return "TestNotebookLifecycleConfig"
@property
def describe_function_name(self):
return "describe_notebook_instance_lifecycle_config"
@property
def name_parameter(self):
return "NotebookInstanceLifecycleConfigName"
@property
def arn_parameter(self):
return "NotebookInstanceLifecycleConfigArn"
def get_cloudformation_template(self, include_outputs=True, **kwargs):
on_create = kwargs.get("on_create")
on_start = kwargs.get("on_start")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
self.resource_name: {
"Type": "AWS::SageMaker::NotebookInstanceLifecycleConfig",
"Properties": {},
},
},
}
if on_create is not None:
template["Resources"][self.resource_name]["Properties"]["OnCreate"] = [
{"Content": on_create}
]
if on_start is not None:
template["Resources"][self.resource_name]["Properties"]["OnStart"] = [
{"Content": on_start}
]
if include_outputs:
template["Outputs"] = {
"Arn": {"Value": {"Ref": self.resource_name}},
"Name": {
"Value": {
"Fn::GetAtt": [
self.resource_name,
"NotebookInstanceLifecycleConfigName",
]
}
},
}
return json.dumps(template)
class ModelTestConfig(TestConfig):
"""Test configuration for SageMaker Models."""
@property
def resource_name(self):
return "TestModel"
@property
def describe_function_name(self):
return "describe_model"
@property
def name_parameter(self):
return "ModelName"
@property
def arn_parameter(self):
return "ModelArn"
def get_cloudformation_template(self, include_outputs=True, **kwargs):
execution_role_arn = kwargs.get(
"execution_role_arn", "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
)
image = kwargs.get(
"image", "404615174143.dkr.ecr.us-east-2.amazonaws.com/linear-learner:1"
)
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
self.resource_name: {
"Type": "AWS::SageMaker::Model",
"Properties": {
"ExecutionRoleArn": execution_role_arn,
"PrimaryContainer": {"Image": image,},
},
},
},
}
if include_outputs:
template["Outputs"] = {
"Arn": {"Value": {"Ref": self.resource_name}},
"Name": {"Value": {"Fn::GetAtt": [self.resource_name, "ModelName"],}},
}
return json.dumps(template)
| 5,606 | 1,574 |
#!/usr/bin/env python
# coding: utf-8
# # Virtual environments
# #### Attribution
#
# The conda virtual environment section of this guide
# was originally published at http://geohackweek.github.io/ under a CC-BY license
# and has been updated to reflect recent changes in conda,
# as well as modified slightly to fit the MDS lecture format.
#
# ## Topic learning goals
#
# By the end of the topic you will be able to:
#
# 1. Explain what a virtual environment is and why it can be useful for reproducible data analyses
# 2. Discuss the advantages and limitations of virtual environment tools (e.g., `conda` and `renv`) in the context of reproducible data analyses
# 3. Use, create and share virtual environments (for example, with conda and `renv`)
# ## Virtual environments
#
# Virtual environments let's you have multiple versions of programming languages
# packages, and other programs on the same computer, while keeping them isolated
# so they do not create conflicts with each other.
# In practice virtual environments are used in one or multiple projects.
# And you might have several virtual environments stored on your laptop,
# so that you can have a different collection of versions programming languages
# and their packages for each project, as needed.
#
# Most virtual environment tools have a sharing functionality which aids in making
# data science projects reproducible,
# as not only is there a record of the computational environment,
# but that computational environment can be shared to others computer -
# facilitating the reproduction of results from data and code.
# This facilitation comes from the fact that programming languages
# and their packages are not static - they change!
# There are new features added, bugs are fixed, etc,
# and this can impact how your code runs!
# Therefore, for a data science project to be reproducible across time,
# you need the computational environment in addition to the data and the code.
#
# There are several other major benefits of using environments:
#
# - If two of your projects on your computer rely on different versions of the same package,
# you can install these in different environments.
# - If you want to play around with a new package,
# you don't have to change the packages versions you use for your data analysis project
# and risk messing something up
# (package version often get upgraded when we install other new packages that share dependencies).
# - When you develop your own packages,
# it is essential to use environments,
# since you want to to make sure you know exactly which packages yours depend on,
# so that it runs on other systems than your own.
#
#
# There are **MANY** version virtual environment tools out there,
# even if we just focus on R and Python.
# When we do that we can generate this list:
#
# #### R virtual environment tools
# - `packrat`
# - `renv`
# - `conda`
#
# #### Python virtual environment tools
# - `venv`
# - `virtualenv`
# - `conda`
# - `mamba`
# - `poetry`
# - `pipenv`
# - ... there may be more that I have missed.
#
#
# In this course, we will learn about `conda` and `renv`.
# `conda` is nice because it can work with both R and Python.
# Although a downside of `conda` is that it is not as widely adopted in the R community
# as Python is, and therefore there are less R packages available from it,
# and less recent versions of those R packages than available directly from the R package index (CRAN).
# It is true, that you can create a `conda` package for any R package that exists on CRAN,
# however this takes time and effort and is sometimes non-trivial.
#
# Given that, we will also learn about `renv` - a new virtual environment tool in R
# that is gaining widespread adoption.
# It works directly with the packages on CRAN,
# and therefore allows users to crete R virtual environments with
# the most up to date packages, and all R packages on CRAN, with less work compared to `conda`.
#
# > Note on terminology: Technically what we are discussing here in this topic are referred to as virtual environments.
# > However, in practice we often drop the "virtual" when discussing this and refer to these as simply "environments".
# > That may happen in these lecture notes, as well as in the classroom.
# ## Conda
#
# [**conda**](http://conda.pydata.org/docs/) is an **open source `package` and `environment` management system for any programming language**;
# though it is most popular in the Python community.
# `conda` was originally developed by [Anaconda Inc.](https://www.anaconda.com/products/individual)
# and bundled with their Anaconda distribution of Python.
# However, `conda`'s widespread popularity
# and utility led to its decoupling into its own package.
#
# It is now available for installation via:
# - Anaconda Python distribution
# - Miniconda Python distribution (this is what we recommended most of you install for this course)
# - Miniforge (this is what we recommended folks with Mac ARM machines install for this course)
#
# Conda builds of R and Python packages, are in fact R and Python packages and built from
# R and Python source code, but they are packaged up and built differently,
# and with a different tool chain.
# How to create `conda` packages from R and Python source code
# is beyond the scope of this course.
# However, we direct keen learners of this topic to the documentation on how to do this:
# - [Conda-build documentation](https://docs.conda.io/projects/conda-build/en/latest/)
#
# What we will focus on learning is how to use `conda`
# to create virtual environments,
# record the components of the virtual environment
# and share the virtual environment with collaborators
# in a way that they can recreate it on their computer.
#
# ### Managing Conda
#
# Let's first start by checking if conda is installed (it should be if we followed the recommended course computer setup instructions) by running:
#
# ```
# conda --version
# ```
#
# To see which conda commands are available,
# type `conda --help`.
# To see the full documentation for any command of these commands,
# type the command followed by `--help`.
# For example,
# to learn about the conda update command:
#
# ```
# conda update --help
# ```
#
# Let's update our conda to the latest version.
# Note that you might already have the latest version since we downloaded it recently.
#
# ```
# conda update conda
# ```
#
# You will see some information about what there is to update
# and be asked if you want to confirm.
# The default choice is indicated with `[]`,
# and you can press <kbd>Enter</kbd> to accept it.
# It would look similar to this:
#
# ```
# Using Anaconda Cloud api site https://api.anaconda.org
# Fetching package metadata: ....
# .Solving package specifications: .........
#
# Package plan for installation in environment //anaconda:
#
# The following packages will be downloaded:
#
# package | build
# ---------------------------|-----------------
# conda-env-2.6.0 | 0 601 B
# ruamel_yaml-0.11.14 | py27_0 184 KB
# conda-4.2.12 | py27_0 376 KB
# ------------------------------------------------------------
# Total: 560 KB
#
# The following NEW packages will be INSTALLED:
#
# ruamel_yaml: 0.11.14-py27_0
#
# The following packages will be UPDATED:
#
# conda: 4.0.7-py27_0 --> 4.2.12-py27_0
# conda-env: 2.4.5-py27_0 --> 2.6.0-0
# python: 2.7.11-0 --> 2.7.12-1
# sqlite: 3.9.2-0 --> 3.13.0-0
#
# Proceed ([y]/n)? y
#
# Fetching packages ...
# conda-env-2.6. 100% |################################| Time: 0:00:00 360.78 kB/s
# ruamel_yaml-0. 100% |################################| Time: 0:00:00 5.53 MB/s
# conda-4.2.12-p 100% |################################| Time: 0:00:00 5.84 MB/s
# Extracting packages ...
# [ COMPLETE ]|###################################################| 100%
# Unlinking packages ...
# [ COMPLETE ]|###################################################| 100%
# Linking packages ...
# [ COMPLETE ]|###################################################| 100%
# ```
#
# In this case,
# conda itself needed to be updated,
# and along with this update some dependencies also needed to be updated.
# There is also a NEW package that was INSTALLED in order to update conda.
# You don't need to worry about remembering to update conda,
# it will let you know if it is out of date when you are installing new packages.
# ### Managing `conda` environments
#
# #### What is a conda environment and why is it so useful?
#
# Using `conda`, you can create an isolated R or Python virtual environment for your project.
# The default environment is the `base` environment,
# which contains only the essential packages from Miniconda
# (and anything else you have installed in it since installing Miniconda).
# You can see that your shell's prompt string is prefaced with `(base)`
# when you are inside this environment:
#
# ```{bash}
# (base) Helps-MacBook-Pro:~ tiffany$
# ```
#
# In the computer setup guide,
# we asked you to follow instructions so that this environment
# will be activatd by default every time you open your terminal.
#
# To create another environment on your computer,
# that is isolated from the `(base)` environment
# you can either do this through:
#
# 1. Manual specifications of packages.
# 2. An environment file in YAML format (`environment.yml`).
#
# We will now discuss both, as they are both relevant workflows for data science.
# When do you use one versus the other?
# I typically use the manual specifications of packages when I am creating
# a new data science project.
# From that I generate an environment file in YAML format
# that I can share with collaborators (or anyone else who wants to reproduce my work).
# Thus, I use an environment file in YAML format when I join a project as a collaborator
# and I need to use the same environment that has been previously used for that project,
# or when I want to reproduce someone else's work.
# ### Creating environment by manually specifying packages
#
# We can create `test_env` conda environment by typing `conda -n <name-of-env>`.
# However,
# it is often useful to specify more than just the name of the environment,
# e.g. the channel from which to install packages, the Python version,
# and a list of packages to install into the new env.
# In the example below,
# I am creating the `test_env` environment
# that uses python 3.7 and a list of libraries: `jupyterlab` and `pandas`.
#
# ```
# conda create -n test_env -c conda-forge python=3.7 jupyterlab pandas=1.0.2
# ```
#
# conda will solve any dependencies between the packages like before
# and create a new environment with those packages.
# Usually,
# we don't need to specify the channel,
# but in this case I want to get the very latest version of these packages,
# and they are made available in `conda-forge`
# before they reach the default conda channel.
#
# To activate this new environment,
# you can type `conda activate test_env`
# (and `conda deactivate` for deactivating).
# Since you will do this often,
# we created an alias shortcut `ca`
# that you can use to activate environments.
# To know the current environment that you're in you can look at the prefix
# of the prompt string in your shell which now changed to (`test_env`).
# And to see all your environments,
# you can type `conda env list`.
# ### Seeing what packages are available in an environment
#
# We will now check packages that are available to us.
# The command below will list all the packages in an environment, in this case `test_env`.
# The list will include versions of each package, the specific build,
# and the channel that the package was downloaded from.
# `conda list` is also useful to ensure that you have installed the packages that you desire.
#
# ```
# conda list
# ```
#
# ```
# # packages in environment at //miniconda/envs/test_env:
# #
# Using Anaconda Cloud api site https://api.anaconda.org
# blas 1.1 openblas conda-forge
# ca-certificates 2016.9.26 0 conda-forge
# certifi 2016.9.26 py27_0 conda-forge
# cycler 0.10.0 py27_0 conda-forge
# freetype 2.6.3 1 conda-forge
# functools32 3.2.3.2 py27_1 conda-forge
# libgfortran 3.0.0 0 conda-forge
# ```
# ### Installing conda package
#
# Under the name column of the result in the terminal or the package column in the Anaconda Cloud listing,
# shows the necessary information to install the package.
# e.g. conda-forge/rasterio.
# The first word list the channel that this package is from and the second part shows the name of the package.
#
# To install the latest version available within the channel, do not specify in the install command. We will install version 0.35 of `rasterio` from conda-forge into `test_env` in this example. Conda will also automatically install the dependencies for this package.
#
# ```
# conda install -c conda-forge rasterio=0.35
# ```
#
# If you have a few trusted channels that you prefer to use, you can pre-configure these so that everytime you are creating an environment, you won't need to explicitly declare the channel.
#
# ```
# conda config --add channels conda-forge
# ```
# ### Removing a conda package
#
# We decided that rasterio is not needed in this tutorial, so we will remove it from `test_env`.
# Note that this will remove the main package rasterio and its dependencies (unless a dependency was installed explicitly at an earlier point in time or is required be another package).
#
# ```
# conda remove -n test_env rasterio
# ```
#
# ```
# Using Anaconda Cloud api site https://api.anaconda.org
# Fetching package metadata .........
# Solving package specifications: ..........
#
# Package plan for package removal in environment //anaconda/envs/test_env:
#
# The following packages will be REMOVED:
#
# rasterio: 0.35.1-np111py27_1 conda-forge
#
# Proceed ([y]/n)? y
#
# Unlinking packages ...
# [ COMPLETE ]|#######################################################################################################| 100%
# ```
# ### Sharing Environments with others
#
# To share an environment, you can export your conda environment to an environment file,
# which will list each package and its version
# in the format `package=version=build`.
#
# Exporting your environment to a file called `environment.yaml`
# (it could be called anything,
# but this is the conventional name
# and using it makes it easy for others
# to recognize that this is a conda env file,
# the extension can be either `.yaml` or `.yml`):
#
# ```
# conda env export --from-history -f environment.yml
# ```
#
# Remember that `.yaml` files are plain text,
# so you can use a text editor such as VS Code to open them.
# If you do,
# you will realize that this environment file has A LOT more packages
# than `jupyterlab` and `pandas`.
# This is because the default behavior is to also list the dependencies
# that were installed together with these packages,
# e.g. `numpy`.
# This is good in the sense that it gives an exact copy of *everything*
# in your environment.
#
# We use the `--from-history` flag/option above as
# some dependencies might differ between operating systems,
# so this file *might* not work with someone from a different OS.
# The `--from-history` flag,
# looks at the history of the packages you explicitly told `conda` to install
# and only list those in the export.
# The required dependencies will then be handled in an OS-specific manner during the installation,
# which guarantees that they will work across OSes.
# This `environment.yaml` file would be much shorter and look something like this:
#
# ```yaml
# name: test_env
# channels:
# - conda-forge
# - defaults
# dependencies:
# - conda
# - python=3.7
# - pandas==1.0.2
# - jupyterlab
# ```
#
# Importantly,
# this will not include the package version
# unless you included it when you installed
# with the `package==version` syntax.
# For an environment to be reproducible,
# you **NEED** to add the version string manually.
# ### Creating environment from an environment file
#
# Now, let's install `environment.yml` environment file above so that we can create a conda environment called `test_env`.
#
# ```
# $ conda env create --file environment.yml
# ```
#
# #### Exercise
#
# Create an environment on your laptop with an older version of Python!
#
# 1. Clone [this GitHub repository](https://github.com/ttimbers/conda_env_practice/blob/main/README.md).
#
# 2. Try to run some antiquated (Python 3.0.0 and higher compatible) Python code, such as `python -c "print 'Back from the Future'"`. This should fail.
#
# 3. In the terminal, navigate to the root of the repository and run: `conda env create --file environment.yml`
#
# 4. Activate the environment by typing `conda activate oldie_but_a_goodie`
#
# 5. Try again to run some antiquated (Python 3.0.0 and higher compatible) Python code, such as `python -c "print 'Back from the Future'"`. If you activated your environment correctly, this should succeed!
#
# ### Copying an environment
#
# We can make an exact copy of an environment to an environment with a different name.
# This maybe useful for any testing versus live environments or different Python 3.7 versions for the same packages.
# In this example, `test_env` is cloned to create `live_env`.
#
# ```
# conda create --name live_env --clone test_env
# ```
# ### Listing all environments on your laptop
#
# You may have created an environment
# and forgotten what you named it,
# or you want to do some cleanup
# and delete old environments (next topic),
# and so you want to see which exist on your computer
# and remove the ones you are no longer using.
# To do this we will use the `info` command
# along with the `--envs` flag/option.
#
#
# ```{bash}
# conda info --envs
# ```
#
# > **Note:** Listing all the `conda` environments on your laptop with this command
# > also shows you where `conda` stores these environments.
# > Typically `conda` environments are stored in `/Users/<USERNAME>/opt/miniconda3/envs`.
# > This means that `conda` environments are typically in your terminal's path,
# > resulting in the environments being accessible from any directory on your computer,
# > regardless of where they were created.
# > However, despite this flexibility,
# > commonly one environment is created per project,
# > and the `environment.yml` file that is used for sharing the `conda` environment
# > is stored in the project root.
# ### Deleting an environment
#
# Since we are only testing out our environment,
# we will delete `live_env` to remove some clutter.
# *Make sure that you are not currently using `live_env`.*
#
# ```
# conda env remove -n live_env
# ```
# ### Making environments work well with JupyterLab
#
# In brief,
# you need to install a kernel in the new conda environment
# in any new environment your create (`ipykernel` for Python
# and the `r-irkernel` package for R),
# and the `nb_conda_kernels` package needs to be installed
# in the environment where JupyterLab is installed.
#
# By default,
# JupyterLab only sees the conda environment where it is installed.
# Since it is quite annoying to install JupyterLab and its extensions separately in each environment,
# there is a package called `nb_conda_kernels` that makes it possible
# to have a single installation of JupyterLab access kernels in other conda environments.
# This package needs to be installed in the conda environment
# where JupyterLab is installed.
# For the computer setup for this course, we did that in the `base` environment,
# so that is where you would need to install `nb_conda_kernels` to make this work.
#
#
# *More details are available in the [nb_conda_kernels README](https://github.com/Anaconda-Platform/nb_conda_kernels#installation)).*
# Remeber that when you forget a specific command
# you can type in the help command we have created `mds-help`
# in you terminal to see a list of all commands we use in MDS.
# ## `renv`
#
# In R,
# environments can also be managed by `renv`,
# which works with similar principles as `conda`,
# and other virtual environment managers,
# but the commands are different.
# Detailed documentation for `renv`,
# can found at the [package website](https://rstudio.github.io/renv/index.html).
#
# `renv` differs from `conda` in the way that it adds package dependencies.
# Briefly, when you prompt `renv` to create (or update) a file to record the project dependencies (done via `renv`'s `snapshot()` function),
# it recursively crawls the files in the project
# looking for calls to `library()` or `require()`.
#
# The key file `renv` creates for recording and sharing environments is called `renv.lock`
# in the project's root directory.
# Other files are created in the project's root directory when you use `renv`
# but `renv.lock` is the file that documents which programming languages and packages
# (including versions) are used in the project.
# It is recommended that when sharing an `renv` environment that you version control `renv.lock`, `.Rprofile` and `renv/activate.R` to facilitate collaboration.
# When you setup an `renv` environment with `renv::init()` it creates a `renv/.gitignore` file
# so that files that `renv` creates and uses locally but are not helpful to share, are not shared.
#
# `renv` environments work best in the context of RStudio projects - and so it is recommended that you create an RStudio project that corresponds to the root of your data science project repository. If this is not done - `renv` will crawl files outside of the project, looking for dependencies.
#
# Below we create a table with the general virtual environment commands for `renv` as well as the equivalent `conda` command for comparison:
#
# | Description | `renv` command |`conda` command |
# |------------------------------|--------------------|-----------------------------------|
# | Create a new environment without an environment file | `renv::init()` | `conda create -n <ENV_NAME> ...` |
# | Activate a new environment | `renv::activate()` | `conda activate <ENV_NAME>` |
# | Export environment to a file | `renv::snapshot()` | `conda env export --from-history -f environment.yml` | |
# | Create a new environment from an environment file | `renv::restore()` | `conda env create --file environment.yml` |
| 22,904 | 6,332 |
import base64
import datetime, pytz
from re import X
import io
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from scipy import spatial
from skyfield.api import load, Star
from skyfield.projections import build_stereographic_projection
from ..astro.angdist import chord_length
from ..astro.transform import get_cartesian
from ..dso.models import DSO
from ..plotting.map import *
from ..solar_system.plot import r2d, d2r
from ..utils.format import to_hm, to_dm
from .finder import plot_dso
def plate_list():
ldec = [90, 75, 60, 45, 30, 15, 0, -15, -30, -45, -60, -75, -90]
lsize = [1, 12, 16, 20, 24, 32, 48, 32, 24, 20, 16, 12, 1]
mul = [0., 2.0, 1.5, 1.2, 1.0, 0.75, 0.5, 0.75, 1.0, 1.2, 1.5, 2.0, 0.]
plate = {}
j = 1
for i in range(len(ldec)):
dec = ldec[i]
if abs(dec) == 90:
plate[j] = (0, dec)
j += 1
continue
ras = [x * mul[i] for x in range(lsize[i])]
for ra in ras:
plate[j] = (ra, dec)
j += 1
return plate
def get_fn(ra, dec, shapes=False):
rah = int(ra)
ram = int(60.*(ra - rah) + 0.00005)
decs = 'N' if dec >= 0.0 else 'S'
d = abs(dec)
dd = int(d)
dm = int(60.*(d - dd) + 0.00005)
x = 'X' if shapes else ''
return f"{rah:02d}{ram:02d}{decs}{dd:02d}{dm:02d}.png"
def get_dsos_on_plate(ra, dec, fov=20):
fudge = 120
dsos = DSO.objects.all()
radius = chord_length(fov, degrees=True) * fudge
coords = []
for other in dsos:
coords.append(other.get_xyz)
center = get_cartesian(ra, dec, ra_dec=True)
tree = spatial.KDTree(coords)
neighbor_list = tree.query_ball_point([center], radius)
neighbor_objects = []
for idx in neighbor_list[[0][0]]:
neighbor_objects.append(dsos[idx])
return neighbor_objects
def create_atlas_plot(
center_ra, center_dec,
reversed=False, mag_limit=9.5,
fov=20, save_file=True,
mag_offset = 0, shapes = False,
label_size = 'x-small',
label_weight = 'normal'
):
ts = load.timescale()
# Datetime is arbitrary
t = ts.from_datetime(datetime.datetime(2022, 1, 1, 0, 0).replace(tzinfo=pytz.utc)) # Arbitrary time
eph = load('de421.bsp')
earth = eph['earth']
zenith = earth.at(t).observe(Star(ra_hours=center_ra, dec_degrees=center_dec))
ra = to_hm(center_ra) # String value
dec = to_dm(center_dec) # string value
# Start up a Matplotlib plot
style = 'dark_background' if reversed else 'default'
plt.style.use(style)
fig, ax = plt.subplots(figsize=[9,9])
# center
projection = build_stereographic_projection(zenith)
angle = np.pi - fov / 360. * np.pi
limit = np.sin(angle) / (1.0 - np.cos(angle))
# NOW PLOT THINGS!
# 1. stars and constellation lines
ax, stars = map_hipparcos(ax, earth, t, mag_limit, projection, reversed=reversed, mag_offset=mag_offset)
ax = map_constellation_lines(ax, stars, reversed=reversed)
ax = map_bright_stars(ax, earth, t, projection, points=False, annotations=True, reversed=reversed)
if shapes:
other_dso_records = DSO.objects.order_by('-major_axis_size')
other_dsos = {'x': [], 'y': [], 'label': [], 'marker': []}
for other in other_dso_records:
x, y = projection(earth.at(t).observe(other.skyfield_object))
if abs(x) > limit or abs(y) > limit:
continue # not on the plot
other_dsos['x'].append(x)
other_dsos['y'].append(y)
other_dsos['label'].append(other.shown_name)
other_dsos['marker'].append(other.object_type.marker_type)
ax = plot_dso(ax, x, y, other, alpha=0.6)
xxx = np.array(other_dsos['x'])
yyy = np.array(other_dsos['y'])
for x, y, z in zip(xxx, yyy, other_dsos['label']):
plt.annotate(
z, (x, y),
textcoords='offset points',
xytext=(5, 5),
ha='left'
)
else:
ax, _ = map_dsos(ax, earth, t, projection,
center = (center_ra, center_dec),
reversed=reversed,
label_size=label_size,
label_weight=label_weight,
product = 'atlas'
)
# Set the display
ax.set_xlim(-limit, limit)
ax.set_ylim(-limit, limit)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
secax = ax.secondary_xaxis('bottom', functions=(r2d, d2r))
secax.set_xlabel('Degrees')
secay = ax.secondary_yaxis('left', functions=(r2d, d2r))
title = f"Chart: RA {ra} DEC {dec}"
ax.set_title(title)
on_plate = get_dsos_on_plate(center_ra, center_dec, fov=fov)
if save_file:
fn = get_fn(center_ra, center_dec, shapes=shapes)
fig.savefig('media/atlas_images/{}'.format(fn), bbox_inches='tight')
plt.cla()
plt.close(fig)
return fn, on_plate
plt.tight_layout(pad=2.0)
# Convert to a PNG image
pngImage = io.BytesIO()
FigureCanvas(fig).print_png(pngImage)
pngImageB64String = 'data:image/png;base64,'
pngImageB64String += base64.b64encode(pngImage.getvalue()).decode('utf8')
# close things
plt.cla()
plt.close(fig)
return pngImageB64String, on_plate
"""
258 atlas plates
1: 0, +90 N polar plot
2 - 13: 2.0h, +75
14 - 29: 1.5h, +60
30 - 49: 1.2h, +45
50 - 73: 1.0h, +30
74 - 105: 0.75h, +15
106 - 153: 0.5h, 0
154 - 185: 0.75h, -15
186 - 209: 1.0h, -30
210 - 229: 1.2h, -45
230 - 245: 1.5h, -60
246 - 257: 2.0h, -75
258: 0, -90 S polar plot
""" | 5,748 | 2,323 |
# 5.2 Write a program that repeatedly prompts a user for integer numbers until the user enters 'done'. Once 'done' is entered, print out the largest and smallest of the numbers. If the user enters anything other than a valid number catch it with a try/except and put out an appropriate message and ignore the number. Enter 7, 2, bob, 10, and 4 and match the output below.
lock = 0
while True:
num = input("Enter a number: ")
if num == "done":
break
try:
value = int(num)
# default max/min values
if lock == 0:
largest = value
smallest = value
# finding max/min values
if value > largest:
largest = value
elif value < smallest:
smallest = value
lock += 1
except:
print("Invalid input")
print("Maximum is", largest)
print("Minimum is", smallest)
| 884 | 234 |
from pipeline.pipeline import pipeline | 38 | 8 |
# - *- coding: utf- 8 - *-
from aiogram import Router
from tgbot.routers.user.user_menu import router_user_menu
# Подключение хендлеров для юзера
def setup_user_handlers(user_router: Router):
user_router.include_router(router_user_menu)
| 244 | 91 |
# -*- coding: utf-8 -*-
from .grammar import SchemaGrammar
class SQLiteSchemaGrammar(SchemaGrammar):
_modifiers = ['unsigned', 'nullable', 'default', 'increment']
_serials = ['big_integer', 'integer']
def compile_rename_column(self, blueprint, command, connection):
"""
Compile a rename column command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
sql = []
# If foreign keys are on, we disable them
foreign_keys = self._connection.select('PRAGMA foreign_keys')
if foreign_keys:
foreign_keys = bool(foreign_keys[0])
if foreign_keys:
sql.append('PRAGMA foreign_keys = OFF')
sql += super().compile_rename_column(
blueprint, command, connection)
if foreign_keys:
sql.append('PRAGMA foreign_keys = ON')
return sql
def compile_change(self, blueprint, command, connection):
"""
Compile a change column command into a series of SQL statement.
:param blueprint: The blueprint
:type blueprint: orator.schema.Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
sql = []
# If foreign keys are on, we disable them
foreign_keys = self._connection.select('PRAGMA foreign_keys')
if foreign_keys:
foreign_keys = bool(foreign_keys[0])
if foreign_keys:
sql.append('PRAGMA foreign_keys = OFF')
sql += super(SQLiteSchemaGrammar,
self).compile_change(blueprint, command, connection)
if foreign_keys:
sql.append('PRAGMA foreign_keys = ON')
return sql
def compile_table_exists(self):
"""
Compile the query to determine if a table exists
:rtype: str
"""
result = ("SELECT * FROM sqlite_master WHERE type = 'table' "
"AND name = %(marker)s" % {'marker': self.get_marker()})
return result
def compile_column_exists(self, table):
"""
Compile the query to determine the list of columns.
"""
return 'PRAGMA table_info(%s)' % table.replace('.', '__')
def compile_create(self, blueprint, command, _):
"""
Compile a create table command.
"""
columns = ', '.join(self._get_columns(blueprint))
sql = 'CREATE TABLE %s (%s' % (self.wrap_table(blueprint), columns)
sql += self._add_foreign_keys(blueprint)
sql += self._add_primary_keys(blueprint)
return sql + ')'
def _add_foreign_keys(self, blueprint):
sql = ''
foreigns = self._get_commands_by_name(blueprint, 'foreign')
for foreign in foreigns:
sql += self._get_foreign_key(foreign)
if foreign.get('on_delete'):
sql += ' ON DELETE %s' % foreign.on_delete
if foreign.get('on_update'):
sql += ' ON UPDATE %s' % foreign.on_delete
return sql
def _get_foreign_key(self, foreign):
on = self.wrap_table(foreign.on)
columns = self.columnize(foreign.columns)
references = foreign.references
if not isinstance(references, list):
references = [references]
on_columns = self.columnize(references)
return ', FOREIGN KEY(%s) REFERENCES %s(%s)' % (
columns, on, on_columns)
def _add_primary_keys(self, blueprint):
primary = self._get_command_by_name(blueprint, 'primary')
if primary:
columns = self.columnize(primary.columns)
return ', PRIMARY KEY (%s)' % columns
return ''
def compile_add(self, blueprint, command, _):
table = self.wrap_table(blueprint)
columns = self.prefix_list('ADD COLUMN', self._get_columns(blueprint))
statements = []
for column in columns:
statements.append('ALTER TABLE %s %s' % (table, column))
return statements
def compile_unique(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'CREATE UNIQUE INDEX %s ON %s (%s)' % (
command.index, table, columns)
def compile_index(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'CREATE INDEX %s ON %s (%s)' % (command.index, table, columns)
def compile_foreign(self, blueprint, command, _):
pass
def compile_drop(self, blueprint, command, _):
return 'DROP TABLE %s' % self.wrap_table(blueprint)
def compile_drop_if_exists(self, blueprint, command, _):
return 'DROP TABLE IF EXISTS %s' % self.wrap_table(blueprint)
def compile_drop_column(self, blueprint, command, connection):
schema = connection.get_schema_manager()
table_diff = self._get_table_diff(blueprint, schema)
for name in command.columns:
column = connection.get_column(blueprint.get_table(), name)
table_diff.removed_columns[name] = column
return schema.get_database_platform().get_alter_table_sql(table_diff)
def compile_drop_unique(self, blueprint, command, _):
return 'DROP INDEX %s' % command.index
def compile_drop_index(self, blueprint, command, _):
return 'DROP INDEX %s' % command.index
def compile_rename(self, blueprint, command, _):
from_ = self.wrap_table(blueprint)
return 'ALTER TABLE %s RENAME TO %s' % (
from_, self.wrap_table(command.to))
# why need comment in SQLite Schema?
# Because multi orator type dump to same SQLite type
# so meet a SQLite type, doesn't know the origin type
def _type_char(self, column):
return 'VARCHAR /*char(%%s,%s)*/' % column.length
def _type_string(self, column):
return 'VARCHAR /*string(%%s,%s)*/' % column.length
def _type_text(self, column):
return 'TEXT /*text(%s)*/'
def _type_medium_text(self, column):
return 'TEXT /*medium_text(%s)*/'
def _type_long_text(self, column):
return 'TEXT /*long_text(%s)*/'
def _type_integer(self, column):
return 'INTEGER /*integer(%s)*/'
def _type_big_integer(self, column):
return 'INTEGER /*big_integer(%s)*/'
def _type_medium_integer(self, column):
return 'INTEGER /*medium_integer(%s)*/'
def _type_tiny_integer(self, column):
return 'TINYINT /*tiny_integer(%s)*/'
def _type_small_integer(self, column):
return 'INTEGER /*small_integer(%s)*/'
def _type_float(self, column):
return 'FLOAT /*float(%s)*/'
def _type_double(self, column):
if column.total and column.places:
return 'FLOAT /*double(%%s,%s,%s)*/' % (
column.total, column.places)
return 'FLOAT /*double(%s)*/'
def _type_decimal(self, column):
return 'NUMERIC /*DECIMAL(%%s,%s,%s)*/' % (
column.total, column.places)
def _type_boolean(self, column):
return 'TINYINT /*boolean(%s)*/'
def _type_enum(self, column):
return 'VARCHAR /*enum(%%s,%s)*/' % column.allowed
def _type_json(self, column):
return 'TEXT /*json(%s)*/'
def _type_date(self, column):
return 'DATE /*date(%s)*/'
def _type_datetime(self, column):
return 'DATETIME /*datetime(%s)*/'
def _type_time(self, column):
return 'TIME /*time(%s)*/'
def _type_timestamp(self, column):
if column.use_current:
return 'DATETIME /*timestamp(%s)*/ DEFAULT CURRENT_TIMESTAMP'
return 'DATETIME /*timestamp(%s)*/'
def _type_binary(self, column):
return 'BLOB /*binary*/'
def _modify_nullable(self, blueprint, column):
if column.get('nullable'):
return ' NULL'
return ' NOT NULL'
def _modify_unsigned(self, blueprint, column):
# SQLite doesn't have unsigned
# but the schema dumper need this info
if column.get('unsigned', False):
return ' /*unsigned*/'
return ''
def _modify_default(self, blueprint, column):
if column.get('default') is not None:
return ' DEFAULT %s' % self._get_default_value(column.default)
return ''
def _modify_increment(self, blueprint, column):
if column.type in self._serials and column.auto_increment:
return ' PRIMARY KEY AUTOINCREMENT'
return ''
def _get_dbal_column_type(self, type_):
"""
Get the dbal column type.
:param type_: The fluent type
:type type_: str
:rtype: str
"""
type_ = type_.lower()
if type_ == 'enum':
return 'string'
return super()._get_dbal_column_type(type_)
def _list_tables(self):
sql = """\
SELECT name AS table_name
FROM sqlite_master
WHERE type="table"
"""
return sql
def _list_columns(self, table):
sql = """\
PRAGMA table_info('{}');
""".format(table)
return sql
def _plain_sql(self, column):
sql = """\
SELECT sql
FROM sqlite_master
WHERE type = 'table'
AND name = '{}'
""".format(column)
return sql
def _list_indexes(self, table):
sql = """\
PRAGMA index_list('{}')
""".format(table)
return sql
def _show_index(self, index):
sql = """\
PRAGMA index_info('{}')
""".format(index)
return sql
def _list_foreign_keys(self, table):
sql = """\
PRAGMA foreign_key_list('{}')
""".format(table)
return sql
| 10,161 | 3,073 |
#
# PySNMP MIB module ASCEND-MIBVDSL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBVDSL-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:28:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, Bits, TimeTicks, IpAddress, ModuleIdentity, Gauge32, iso, MibIdentifier, Counter32, NotificationType, Counter64, Unsigned32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Bits", "TimeTicks", "IpAddress", "ModuleIdentity", "Gauge32", "iso", "MibIdentifier", "Counter32", "NotificationType", "Counter64", "Unsigned32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DisplayString(OctetString):
pass
mibvdslLineStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 161))
mibvdslLineStatusTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 161, 1), )
if mibBuilder.loadTexts: mibvdslLineStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibvdslLineStatusTable.setDescription('A list of mibvdslLineStatus profile entries.')
mibvdslLineStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1), ).setIndexNames((0, "ASCEND-MIBVDSL-MIB", "vdslLineStatus-Shelf-o"), (0, "ASCEND-MIBVDSL-MIB", "vdslLineStatus-Slot-o"), (0, "ASCEND-MIBVDSL-MIB", "vdslLineStatus-Item-o"))
if mibBuilder.loadTexts: mibvdslLineStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibvdslLineStatusEntry.setDescription('A mibvdslLineStatus entry containing objects that maps to the parameters of mibvdslLineStatus profile.')
vdslLineStatus_Shelf_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 1), Integer32()).setLabel("vdslLineStatus-Shelf-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_Shelf_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_Shelf_o.setDescription('')
vdslLineStatus_Slot_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 2), Integer32()).setLabel("vdslLineStatus-Slot-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_Slot_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_Slot_o.setDescription('')
vdslLineStatus_Item_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 3), Integer32()).setLabel("vdslLineStatus-Item-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_Item_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_Item_o.setDescription('')
vdslLineStatus_PhysicalAddress_Shelf = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("anyShelf", 1), ("shelf1", 2), ("shelf2", 3), ("shelf3", 4), ("shelf4", 5), ("shelf5", 6), ("shelf6", 7), ("shelf7", 8), ("shelf8", 9), ("shelf9", 10)))).setLabel("vdslLineStatus-PhysicalAddress-Shelf").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalAddress_Shelf.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalAddress_Shelf.setDescription('The number of the shelf that the addressed physical device resides on.')
vdslLineStatus_PhysicalAddress_Slot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 55, 56, 57, 58, 49, 50, 42, 53, 54, 45, 46, 51, 59))).clone(namedValues=NamedValues(("anySlot", 1), ("slot1", 2), ("slot2", 3), ("slot3", 4), ("slot4", 5), ("slot5", 6), ("slot6", 7), ("slot7", 8), ("slot8", 9), ("slot9", 10), ("slot10", 11), ("slot11", 12), ("slot12", 13), ("slot13", 14), ("slot14", 15), ("slot15", 16), ("slot16", 17), ("slot17", 18), ("slot18", 19), ("slot19", 20), ("slot20", 21), ("slot21", 22), ("slot22", 23), ("slot23", 24), ("slot24", 25), ("slot25", 26), ("slot26", 27), ("slot27", 28), ("slot28", 29), ("slot29", 30), ("slot30", 31), ("slot31", 32), ("slot32", 33), ("slot33", 34), ("slot34", 35), ("slot35", 36), ("slot36", 37), ("slot37", 38), ("slot38", 39), ("slot39", 40), ("slot40", 41), ("aLim", 55), ("bLim", 56), ("cLim", 57), ("dLim", 58), ("leftController", 49), ("rightController", 50), ("controller", 42), ("firstControlModule", 53), ("secondControlModule", 54), ("trunkModule1", 45), ("trunkModule2", 46), ("controlModule", 51), ("slotPrimary", 59)))).setLabel("vdslLineStatus-PhysicalAddress-Slot").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalAddress_Slot.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalAddress_Slot.setDescription('The number of the slot that the addressed physical device resides on.')
vdslLineStatus_PhysicalAddress_ItemNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 6), Integer32()).setLabel("vdslLineStatus-PhysicalAddress-ItemNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalAddress_ItemNumber.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalAddress_ItemNumber.setDescription('A number that specifies an addressable entity within the context of shelf and slot.')
vdslLineStatus_LineState = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("doesNotExist", 1), ("disabled", 2), ("active", 3)))).setLabel("vdslLineStatus-LineState").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_LineState.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_LineState.setDescription('The overall state of the line.')
vdslLineStatus_SparePhysicalAddress_Shelf = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("anyShelf", 1), ("shelf1", 2), ("shelf2", 3), ("shelf3", 4), ("shelf4", 5), ("shelf5", 6), ("shelf6", 7), ("shelf7", 8), ("shelf8", 9), ("shelf9", 10)))).setLabel("vdslLineStatus-SparePhysicalAddress-Shelf").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslLineStatus_SparePhysicalAddress_Shelf.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_SparePhysicalAddress_Shelf.setDescription('The number of the shelf that the addressed physical device resides on.')
vdslLineStatus_SparePhysicalAddress_Slot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 55, 56, 57, 58, 49, 50, 42, 53, 54, 45, 46, 51, 59))).clone(namedValues=NamedValues(("anySlot", 1), ("slot1", 2), ("slot2", 3), ("slot3", 4), ("slot4", 5), ("slot5", 6), ("slot6", 7), ("slot7", 8), ("slot8", 9), ("slot9", 10), ("slot10", 11), ("slot11", 12), ("slot12", 13), ("slot13", 14), ("slot14", 15), ("slot15", 16), ("slot16", 17), ("slot17", 18), ("slot18", 19), ("slot19", 20), ("slot20", 21), ("slot21", 22), ("slot22", 23), ("slot23", 24), ("slot24", 25), ("slot25", 26), ("slot26", 27), ("slot27", 28), ("slot28", 29), ("slot29", 30), ("slot30", 31), ("slot31", 32), ("slot32", 33), ("slot33", 34), ("slot34", 35), ("slot35", 36), ("slot36", 37), ("slot37", 38), ("slot38", 39), ("slot39", 40), ("slot40", 41), ("aLim", 55), ("bLim", 56), ("cLim", 57), ("dLim", 58), ("leftController", 49), ("rightController", 50), ("controller", 42), ("firstControlModule", 53), ("secondControlModule", 54), ("trunkModule1", 45), ("trunkModule2", 46), ("controlModule", 51), ("slotPrimary", 59)))).setLabel("vdslLineStatus-SparePhysicalAddress-Slot").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslLineStatus_SparePhysicalAddress_Slot.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_SparePhysicalAddress_Slot.setDescription('The number of the slot that the addressed physical device resides on.')
vdslLineStatus_SparePhysicalAddress_ItemNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 10), Integer32()).setLabel("vdslLineStatus-SparePhysicalAddress-ItemNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslLineStatus_SparePhysicalAddress_ItemNumber.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_SparePhysicalAddress_ItemNumber.setDescription('A number that specifies an addressable entity within the context of shelf and slot.')
vdslLineStatus_SparingState = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("sparingNone", 1), ("primaryActive", 2), ("primaryInactive", 3), ("secondaryActive", 4), ("secondaryInactive", 5)))).setLabel("vdslLineStatus-SparingState").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_SparingState.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_SparingState.setDescription('The sparing state of the line.')
vdslLineStatus_SparingChangeReason = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("manual", 2), ("automatic", 3), ("test", 4)))).setLabel("vdslLineStatus-SparingChangeReason").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_SparingChangeReason.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_SparingChangeReason.setDescription('The reason for the last sparing state change.')
vdslLineStatus_SparingChangeTime = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 13), Integer32()).setLabel("vdslLineStatus-SparingChangeTime").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_SparingChangeTime.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_SparingChangeTime.setDescription('The time of the last sparing state change.')
vdslLineStatus_SparingChangeCounter = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 14), Integer32()).setLabel("vdslLineStatus-SparingChangeCounter").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_SparingChangeCounter.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_SparingChangeCounter.setDescription('The number of sparing state changes.')
vdslLineStatus_VpiVciRange = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(6, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("vpi01Vci321023", 6), ("vpi03Vci32511", 1), ("vpi07Vci32255", 2), ("vpi015Vci32127", 3), ("vpi031Vci3263", 4), ("notApplicable", 5)))).setLabel("vdslLineStatus-VpiVciRange").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_VpiVciRange.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_VpiVciRange.setDescription('The valid range of vpi and vci for the circuits established for the line. This range can change only after LIM reboot.')
vdslLineStatus_VpSwitchingVpi = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 16), Integer32()).setLabel("vdslLineStatus-VpSwitchingVpi").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_VpSwitchingVpi.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_VpSwitchingVpi.setDescription('VPI to be used for the VP switching. Rest of the VPIs will be used for the VC switching.')
vdslLineStatus_PhysicalStatus_IfGroupIndex = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 17), Integer32()).setLabel("vdslLineStatus-PhysicalStatus-IfGroupIndex").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_IfGroupIndex.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_IfGroupIndex.setDescription('Interface groups index assigned to this physical port.')
vdslLineStatus_PhysicalStatus_UnitType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("dslCoe", 2), ("dslCpe", 3)))).setLabel("vdslLineStatus-PhysicalStatus-UnitType").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_UnitType.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_UnitType.setDescription('Unit types defines if the node is operating Central Office or Customer Premise equipment software.')
vdslLineStatus_PhysicalStatus_DevLineState = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("dslStatDwnInd", 2), ("dslStatWaitInit", 3), ("dslStatInit", 4), ("dslStatUpInd", 5), ("dslStatFailInd", 6), ("dslStatLback", 7), ("dslStatTest", 8), ("dslStatDownload", 9), ("dslStatNumberStates", 10)))).setLabel("vdslLineStatus-PhysicalStatus-DevLineState").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_DevLineState.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_DevLineState.setDescription('Display of current interface state.')
vdslLineStatus_PhysicalStatus_OpUpRates = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 27), Integer32()).setLabel("vdslLineStatus-PhysicalStatus-OpUpRates").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_OpUpRates.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_OpUpRates.setDescription('Display operational up rate.')
vdslLineStatus_PhysicalStatus_OpDownRates = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 28), Integer32()).setLabel("vdslLineStatus-PhysicalStatus-OpDownRates").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_OpDownRates.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_OpDownRates.setDescription('Display operational down rate.')
vdslLineStatus_PhysicalStatus_FirmwareVer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 29), DisplayString()).setLabel("vdslLineStatus-PhysicalStatus-FirmwareVer").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_FirmwareVer.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatus_FirmwareVer.setDescription('Firmware version ID.')
vdslLineStatus_PhysicalStatistic_LineUpTimer_Days = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 20), Integer32()).setLabel("vdslLineStatus-PhysicalStatistic-LineUpTimer-Days").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_LineUpTimer_Days.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_LineUpTimer_Days.setDescription('Number of days that the DSL line has been up.')
vdslLineStatus_PhysicalStatistic_LineUpTimer_Hours = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 21), Integer32()).setLabel("vdslLineStatus-PhysicalStatistic-LineUpTimer-Hours").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_LineUpTimer_Hours.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_LineUpTimer_Hours.setDescription('Number of hours that the DSL line has been up.')
vdslLineStatus_PhysicalStatistic_LineUpTimer_Minutes = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 22), Integer32()).setLabel("vdslLineStatus-PhysicalStatistic-LineUpTimer-Minutes").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_LineUpTimer_Minutes.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_LineUpTimer_Minutes.setDescription('Number of minutes that the DSL line has been up.')
vdslLineStatus_PhysicalStatistic_RxSignalPresent = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(3, 2))).clone(namedValues=NamedValues(("yes", 3), ("no", 2)))).setLabel("vdslLineStatus-PhysicalStatistic-RxSignalPresent").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_RxSignalPresent.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_RxSignalPresent.setDescription('State if the receive signal is present or not.')
vdslLineStatus_PhysicalStatistic_UpDwnCntr = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 24), Integer32()).setLabel("vdslLineStatus-PhysicalStatistic-UpDwnCntr").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_UpDwnCntr.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_UpDwnCntr.setDescription('Interface Up Down counter value displays the number of times the interface trasitions from a down to up state.')
vdslLineStatus_PhysicalStatistic_SelfTest = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4))).clone(namedValues=NamedValues(("dslSelfTestNone", 2), ("dslSelfTestFailed", 3), ("dslSelfTestPassed", 4)))).setLabel("vdslLineStatus-PhysicalStatistic-SelfTest").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_SelfTest.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_PhysicalStatistic_SelfTest.setDescription('Hardware/firmware self test results.')
vdslLineStatus_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 161, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("vdslLineStatus-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslLineStatus_Action_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslLineStatus_Action_o.setDescription('')
mibBuilder.exportSymbols("ASCEND-MIBVDSL-MIB", vdslLineStatus_PhysicalAddress_Slot=vdslLineStatus_PhysicalAddress_Slot, vdslLineStatus_PhysicalAddress_ItemNumber=vdslLineStatus_PhysicalAddress_ItemNumber, vdslLineStatus_Slot_o=vdslLineStatus_Slot_o, vdslLineStatus_PhysicalStatus_FirmwareVer=vdslLineStatus_PhysicalStatus_FirmwareVer, vdslLineStatus_PhysicalStatistic_SelfTest=vdslLineStatus_PhysicalStatistic_SelfTest, vdslLineStatus_PhysicalStatus_OpDownRates=vdslLineStatus_PhysicalStatus_OpDownRates, vdslLineStatus_VpSwitchingVpi=vdslLineStatus_VpSwitchingVpi, vdslLineStatus_SparePhysicalAddress_ItemNumber=vdslLineStatus_SparePhysicalAddress_ItemNumber, vdslLineStatus_VpiVciRange=vdslLineStatus_VpiVciRange, vdslLineStatus_Shelf_o=vdslLineStatus_Shelf_o, vdslLineStatus_Item_o=vdslLineStatus_Item_o, vdslLineStatus_PhysicalStatistic_LineUpTimer_Hours=vdslLineStatus_PhysicalStatistic_LineUpTimer_Hours, DisplayString=DisplayString, vdslLineStatus_SparingChangeCounter=vdslLineStatus_SparingChangeCounter, mibvdslLineStatusEntry=mibvdslLineStatusEntry, vdslLineStatus_LineState=vdslLineStatus_LineState, mibvdslLineStatusTable=mibvdslLineStatusTable, vdslLineStatus_PhysicalAddress_Shelf=vdslLineStatus_PhysicalAddress_Shelf, vdslLineStatus_PhysicalStatus_UnitType=vdslLineStatus_PhysicalStatus_UnitType, vdslLineStatus_PhysicalStatistic_UpDwnCntr=vdslLineStatus_PhysicalStatistic_UpDwnCntr, vdslLineStatus_Action_o=vdslLineStatus_Action_o, vdslLineStatus_PhysicalStatistic_RxSignalPresent=vdslLineStatus_PhysicalStatistic_RxSignalPresent, vdslLineStatus_SparePhysicalAddress_Slot=vdslLineStatus_SparePhysicalAddress_Slot, vdslLineStatus_PhysicalStatistic_LineUpTimer_Minutes=vdslLineStatus_PhysicalStatistic_LineUpTimer_Minutes, vdslLineStatus_PhysicalStatus_DevLineState=vdslLineStatus_PhysicalStatus_DevLineState, vdslLineStatus_SparingChangeReason=vdslLineStatus_SparingChangeReason, vdslLineStatus_SparingState=vdslLineStatus_SparingState, vdslLineStatus_SparingChangeTime=vdslLineStatus_SparingChangeTime, vdslLineStatus_PhysicalStatus_IfGroupIndex=vdslLineStatus_PhysicalStatus_IfGroupIndex, vdslLineStatus_PhysicalStatistic_LineUpTimer_Days=vdslLineStatus_PhysicalStatistic_LineUpTimer_Days, mibvdslLineStatus=mibvdslLineStatus, vdslLineStatus_SparePhysicalAddress_Shelf=vdslLineStatus_SparePhysicalAddress_Shelf, vdslLineStatus_PhysicalStatus_OpUpRates=vdslLineStatus_PhysicalStatus_OpUpRates)
| 21,050 | 8,690 |
import time
print(time.time())
# DeprecationWarning: time.clock has been deprecated in Python 3.3 and will be removed from Python 3.8: use time.perf_counter or time.process_time instead
# print(time.clock())
print(time.process_time())
print(time.perf_counter())
| 262 | 82 |
import unittest
import strongr.clouddomain.model.gateways
import strongr.core
import strongr.core.domain.clouddomain
class TestInterDomainEvent(unittest.TestCase):
def test_salt_job_finished_escalation_to_inter(self):
intra_domain_event_factory = strongr.clouddomain.model.gateways.Gateways.intra_domain_event_factory()
cloudService = strongr.core.domain.clouddomain.CloudDomain.cloudService() # this initializes event subscribers within the domain
event = intra_domain_event_factory.newSaltJobFinished('1', 'Test return', 0)
strongr.core.Core.inter_domain_events_publisher().subscribe(strongr.core.domain.clouddomain.CloudDomain.events()['jobfinished'], (
lambda event: self.assertTrue(True)
))
strongr.clouddomain.model.gateways.Gateways.intra_domain_events_publisher().publish(event)
| 857 | 270 |
#!/usr/bin/env python
'''
Sync markdown notes with an external folder like dropbox.
Notes will be named *.txt in the external folder and *.md in git.
The script will use proper git commands to do things such as, delete or add new files to git and reflect any git-side changes in the external folder.
This script is intended for use with cron to keep the external folder synced up.
'''
import sys, optparse
def parseCmdLine():
'''
manage cli invocation
'''
usage = '%prog'
version = '%prog v0.0'
description = __doc__
parser = optparse.OptionParser( usage = usage,
version = version,
description = description )
return parser.parse_args()
def main():
opts, args = parseCmdLine()
print ' '.join(args)
return 0
if __name__ == '__main__':
try:
sys.exit( main() )
except KeyboardInterrupt:
print 'Interrupted by User.'
sys.exit( 1 )
| 967 | 268 |
import abc
import logging
import zwoasi
from catkit.config import CONFIG_INI
import catkit.hardware.zwo.ZwoCamera
# Convert zwoasi module to a class such that it can be inherited.
ZwoASI = type("ZwoASI", (), zwoasi.__dict__)
class ZwoEmulator(ZwoASI):
""" Class to emulate of the zwoasi library. """
implemented_camera_purposes = None
@classmethod
def get_camera_mappings(cls):
# Find all cameras
camera_mappings = {}
for camera_purpose in cls.implemented_camera_purposes:
camera_config_id = CONFIG_INI.get("testbed", camera_purpose)
camera_name = CONFIG_INI.get(camera_config_id, 'camera_name')
camera_mappings[camera_config_id] = {"purpose": camera_purpose, "name": camera_name}
return camera_mappings
def __init__(self, config_id):
self.log = logging.getLogger()
self.config_id = config_id
self.image_type = None
self.control_values = {}
self.camera_mappings = self.get_camera_mappings()
if self.config_id not in self.camera_mappings:
raise ValueError(f"Unknown camera for simulations: {self.config_id}")
self.camera_purpose = self.camera_mappings[self.config_id]["purpose"]
def init(self, library_file=None):
pass
@classmethod
def get_num_cameras(cls):
return len(cls.implemented_camera_purposes)
def list_cameras(self):
return [camera["name"] for camera in self.camera_mappings.values()]
def Camera(self, id_):
return self
def get_controls(self):
# only used for oepn behavior to
# get / set control values to default on open
# needs to play nicely with calls to set_controls
# this phony dict is set to have *some* accessible value (None) for
# every dict key we ask for
return {'BandWidth': {'MinValue': None, 'ControlType': None, 'DefaultValue': None}}
def set_control_value(self, control_type, value, auto=False):
accepted_types = (int,)
if value is not None and not isinstance(value, accepted_types):
raise ValueError(f"Expected type {accepted_types} got '{type(value)}'")
self.control_values[control_type] = value
def start_video_capture(self):
pass
def stop_video_capture(self):
pass
def stop_exposure(self):
pass
def set_image_type(self, image_type):
self.image_type = image_type
@abc.abstractmethod
def capture(self, initial_sleep=0.01, poll=0.01, buffer=None, filename=None):
pass
def capture_video_frame(self, buffer=None, filename=None, timeout=None):
return self.capture(buffer=buffer, filename=filename)
def close(self):
pass
def get_camera_property(self):
# sometimes this just gets logged
# one use where it needs keys 'MaxWidth', 'MaxHeight'
# I *think* this can just return a dict with milquetoast values for this
# took said milquetoast values from the __setup_control_values sim
# function here
return {'MaxWidth': 4096, 'MaxHeight': 4096}
pass
def set_id(self, id, id_str):
pass
def set_roi(self, start_x=None, start_y=None, width=None, height=None, bins=None, image_type=None):
# sets region of interest
# purpose : set_roi_format, set_roi_start_position
# set_roi_format --> _set_roi_format :
# check for all the errors
# runs zwolib.ASISetROIFormat(id_, width, height, bins, image_type)
# set_roi_start_position --> _set_start_position :
# runs zwolib.ASISetStartPos(id_, start_x, start_y)
pass # according to Marshall
| 3,706 | 1,157 |
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.python.keras.losses import Loss as tfLoss
from fastestimator.op.tensorop.loss import Loss
class MixUpLoss(Loss):
"""
This class should be used in conjunction with MixUpBatch to perform mix-up training, which helps to reduce
over-fitting, stabilize GAN training, and harden against adversarial attacks (https://arxiv.org/abs/1710.09412)
"""
def __init__(self, loss, lam=None, y_true=None, y_pred=None, inputs=None, outputs="loss", mode=None):
"""
Args:
loss (func): A loss object (tf.losses) which can be invoked like "loss(true, pred)". It's reduction method
will be overridden to 'none'
lam: The key of the lambda value generated by MixUpBatch
y_true: ground truth label key
y_pred: prediction label key
inputs: A tuple or list like: [<lam>, <y_true>, <y_pred>]
outputs: Where to store the computed loss value (not required under normal use cases)
mode: 'train', 'eval', 'test', or None
"""
assert isinstance(loss, tfLoss), "MixUpLoss requires a TensorFlow loss function"
loss_config = loss.get_config()
loss_config['reduction'] = 'none'
inputs = self.validate_loss_inputs(inputs, lam, y_true, y_pred)
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.loss_obj = loss.from_config(loss_config)
def forward(self, data, state):
lam, true, pred = data
loss1 = self.loss_obj(true, pred)
loss2 = self.loss_obj(tf.roll(true, shift=1, axis=0), pred)
return lam * loss1 + (1.0 - lam) * loss2
| 2,371 | 701 |
from .test_config import EMPTY
from scripta import parse
from unittest import TestCase
class ParseTest(TestCase):
def test_parse(self):
actual = vars(parse.parse(['foo']))
expected = dict(EMPTY, sources=['foo'], svg='')
assert actual == expected
| 276 | 79 |
from pybithumb.core import *
from pandas import DataFrame
import pandas as pd
import datetime
import math
class Bithumb:
@staticmethod
def _convert_unit(unit):
try:
unit = math.floor(unit * 10000) / 10000
return unit
except:
return 0
@staticmethod
def get_tickers(payment_currency="KRW"):
"""
빗썸이 지원하는 암호화폐의 리스트
:param payment_currency : KRW
:return:
"""
resp = None
try:
resp = PublicApi.ticker("ALL", payment_currency)
data = resp['data']
tickers = [k for k, v in data.items() if isinstance(v, dict)]
return tickers
except Exception:
return resp
@staticmethod
def get_current_price(order_currency, payment_currency="KRW"):
"""
최종 체결 가격 조회
:param order_currency : BTC/ETH/DASH/LTC/ETC/XRP/BCH/XMR/ZEC/QTUM/BTG/EOS/ICX/VEN/TRX/ELF/MITH/MCO/OMG/KNC
:param payment_currency : KRW
:return : price
"""
resp = None
try:
resp = PublicApi.ticker(order_currency, payment_currency)
if order_currency != "ALL":
return float(resp['data']['closing_price'])
else:
del resp["data"]['date']
return resp["data"]
except Exception:
return resp
| 1,502 | 524 |
import struct
import zlib
from wrpg.piaf.common import (
header_structure,
file_entry_structure,
file_entry_size,
get_data_offset,
header_size,
header_check_size,
header_data_size)
class ParserError(Exception):
pass
class ParserMagicHeaderError(ParserError):
pass
class ParserChecksumError(ParserError):
pass
class ParserDatasizeError(ParserError):
pass
def load_data(buffer, archive, file_sizes):
data_offset = 0
for f, file_size in zip(archive["file_entries"], file_sizes):
data_start = get_data_offset(len(archive["file_entries"]))
data_start_position = data_start+data_offset
f["data"] = buffer[data_start_position: data_start_position+file_size]
data_offset += file_size
def unpack_archive(buffer):
def parse_header():
header = buffer[:header_size()]
( magic_header,
header_checksum,
filetable_checksum,
version,
nb_files,
data_size ) = struct.unpack(header_structure(), header)
magic_header = magic_header.decode('utf-8')
archive = {
"version": version }
if magic_header != 'WRPGPIAF':
raise ParserMagicHeaderError('Bad Magic Header')
calculated_header_checksum = zlib.crc32(buffer[
header_check_size()
:header_check_size()+header_data_size() ]) & 0xffffffff
if calculated_header_checksum != header_checksum:
raise ParserChecksumError('Bad Header Checksum : {} != {}'
.format(calculated_header_checksum, header_checksum))
calculated_file_table_checksum = zlib.crc32(buffer[
header_size()
:header_size()+nb_files*file_entry_size()]
) & 0xffffffff
if calculated_file_table_checksum != filetable_checksum:
raise ParserChecksumError('Bad Filetable Checksum : {} != {}'
.format(calculated_file_table_checksum, filetable_checksum))
if len(buffer) != data_size + get_data_offset(nb_files):
raise ParserDatasizeError('Bad Data Size')
return archive, nb_files
def parse_filetable():
result = []
file_sizes = []
for i in range(nb_files):
file_entry_offset = header_size()+file_entry_size()*i
file_name, file_type, compression_type, file_size, data_offset =\
struct.unpack(
file_entry_structure(),
buffer[ file_entry_offset: file_entry_offset+file_entry_size()]
)
file_entry = { "file_type": file_type,
"compression_type": compression_type }
result.append(file_entry)
file_sizes.append(file_size)
return result, file_sizes
archive, nb_files = parse_header()
archive["file_entries"], file_sizes = parse_filetable()
load_data(buffer, archive, file_sizes)
return archive
| 2,959 | 885 |
# Generated by Django 3.0.5 on 2020-05-06 22:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0008_notificationmodel'),
]
operations = [
migrations.AlterField(
model_name='notificationmodel',
name='entity',
field=models.IntegerField(),
),
]
| 384 | 124 |
import numpy as np
import cv2
img = cv2.imread('city2.jpg',0)
rows,cols = img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)
dst = cv2.warpAffine(img,M,(cols,rows))
cv2.imshow('image cv2',dst)
cv2.waitKey(0)
# to save the image
# cv2.imwrite('image1.png',img)
cv2.destroyAllWindows()
| 301 | 139 |
# Copyright 2021, Peter Birch, mailto:peter@lightlogic.co.uk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..io_common import BaseIO
class AXI4StreamIO(BaseIO):
""" AXI4 stream interface """
def __init__(self, dut, name, role):
""" Initialise AXI4StreamIO.
Args:
dut : Pointer to the DUT boundary
name: Name of the signal - acts as a prefix
role: Role of this signal on the DUT boundary
"""
super().__init__(dut, name, role, [
"tvalid", "tdata", "tstrb", "tkeep", "tlast", "tid", "tdest",
"tuser", "twakeup",
], [
"tready",
])
| 1,160 | 357 |
#coding:utf-8
import pandas as pd
import matplotlib.pyplot as plt
import pyecharts.options as opts
from pyecharts.charts import Line
df = pd.read_csv(u'标普500市盈率历史数据.txt', sep='\t')
y = df['市盈率 (PE Ratio)']
plt.plot(y.values)
plt.show() | 239 | 116 |
import tensorflow as tf
import tensorflow.contrib.slim as slim
from util.util import *
def generator(inputs, scope='g_net',n_levels=2):
n, h, w, c = inputs.get_shape().as_list()
x_unwrap = []
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.separable_conv2d],
activation_fn=parametric_relu, padding='SAME', normalizer_fn=None,
# activation_fn=parametric_relu, padding='SAME', normalizer_fn=tf.layers.batch_normalization,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)):
inp_pred = inputs
for i in range(n_levels):
scale = 0.5 ** (n_levels - i - 1)
hi = int(round(h * scale))
wi = int(round(w * scale))
inp_blur = tf.image.resize_images(inputs, [hi, wi], method=0)
inp_pred = tf.image.resize_images(inp_pred, [hi, wi], method=0)
inp_pred = tf.stop_gradient(inp_pred)
inp_all = tf.concat([inp_blur, inp_pred], axis=3, name='inp')
# encoder
# conv1_1 = slim.separable_conv2d(inp_all, 32, [5, 5], scope='enc1_1_dw')
print(inp_all)
conv0 = slim.conv2d(inp_all, 8, [5, 5], scope='enc0')
net = slim.conv2d(conv0, 16, [5, 5], stride=2, scope='enc1_1')
conv1 = ResBottleneckBlock(net, 16, 5, scope='enc1_2')
net = res_bottleneck_dsconv(conv1, 32, 5, stride=2, scope='enc2_1')
net = ResBottleneckBlock(net, 32, 5, scope='enc2_2')
net = ResBottleneckBlock(net, 32, 5, scope='enc2_3')
conv2 = ResBottleneckBlock(net, 32, 5, scope='enc2_4')
net = res_bottleneck_dsconv(conv2, 64, 5, stride=2, scope='enc3_1')
net = ResBottleneckBlock(net, 64, 5, scope='enc3_2')
net = ResBottleneckBlock(net, 64, 5, scope='enc3_3')
net = ResBottleneckBlock(net, 64, 5, scope='enc3_4')
net = ResBottleneckBlock(net, 64, 5, scope='enc3_5')
net = ResBottleneckBlock(net, 64, 5, scope='enc3_6')
# decoder
net = ResBottleneckBlock(net, 64, 5, scope='dec3_6')
net = ResBottleneckBlock(net, 64, 5, scope='dec3_5')
net = ResBottleneckBlock(net, 64, 5, scope='dec3_4')
net = ResBottleneckBlock(net, 64, 5, scope='dec3_3')
net = ResBottleneckBlock(net, 64, 5, scope='dec3_2')
net = slim.conv2d_transpose(net, 32, [5, 5], stride=2, scope='dec3_1')
net = net + conv2
net = ResBottleneckBlock(net, 32, 5, scope='dec2_4')
net = ResBottleneckBlock(net, 32, 5, scope='dec2_3')
net = ResBottleneckBlock(net, 32, 5, scope='dec2_2')
net = slim.conv2d_transpose(net, 16, [5, 5], stride=2, scope='dec2_1')
net = net + conv1
net = ResBottleneckBlock(net, 16, 5, scope='dec1_2')
net = slim.conv2d_transpose(net, 8, [5, 5], stride=2, scope='dec1_1')
net = net + conv0
inp_pred = slim.conv2d(net, c, [5, 5], activation_fn=None, scope='dec0')
x_unwrap.append(inp_pred)
return x_unwrap | 3,510 | 1,314 |
import dataclasses
class RawCode(typing.NamedTuple):
lang: str
code: str
@dataclasses.dataclass
class TaskModel:
raw_task: RawTask
question: str
answer: str
gif_url: str
raw_codes: typing.List[RawCode]
input_data: str
output_data: str
| 298 | 116 |
from fit.messages import Message
from fit.types.extended import Sport, WorkoutCapabilities, MessageIndex, \
Intensity, WktStepTarget, WktStepDuration
from fit.types.general import UInt16, String, UInt32
class Workout(Message):
msg_type = 26
sport = Sport(4)
capabilities = WorkoutCapabilities(5)
num_valid_steps = UInt16(6)
wkt_name = String(8)
class WorkoutStep(Message):
msg_type = 27
message_index = MessageIndex(254)
wkt_step_name = String(0)
duration_type = WktStepDuration(1)
duration_value = UInt32(2) # variants
target_type = WktStepTarget(3)
target_value = UInt32(4) # variants
custom_target_value_low = UInt32(5) # variants
custom_target_value_high = UInt32(6) # variants
intensity = Intensity(7)
| 781 | 280 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from fakebotdetector import __version__
def get_long_description():
"""Return the README"""
return open('README.md', 'r', encoding='utf8').read()
setup(
name='django-fake-bot-detector',
version=__version__,
url='https://github.com/danmoz/django-fake-bot-detector',
license='Apache Software License',
description='Detect and block fake search bots 🤖',
long_description=get_long_description(),
long_description_content_type='text/markdown',
author='Dan Morrison',
author_email='dan@offworld.net.au',
packages=['fakebotdetector'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| 1,335 | 398 |
/usr/lib64/python3.7/__future__.py | 34 | 18 |
from jnpr.junos import Device
from lxml import etree
dev = Device(host='xxxx', user='demo', password='demo123', gather_facts=False)
dev.open()
cnf = dev.rpc.get_config()
#cnf = dev.rpc.get_config(filter_xml=etree.XML('<configuration><interfaces/></configuration>'))
print etree.tostring(cnf)
| 294 | 109 |
ies = []
ies.append({ "iei" : "", "value" : "Selected NAS security algorithms", "type" : "security algorithms", "reference" : "9.9.3.23", "presence" : "M", "format" : "V", "length" : "1"})
ies.append({ "iei" : "", "value" : "NAS key set identifier", "type" : "key set identifier", "reference" : "9.9.3.21", "presence" : "M", "format" : "V", "length" : "1/2"})
ies.append({ "iei" : "", "value" : "Replayed UE security capabilities", "type" : "UE security capability", "reference" : "9.9.3.36", "presence" : "M", "format" : "LV", "length" : "3-6"})
ies.append({ "iei" : "C-", "value" : "IMEISV request", "type" : "IMEISV request", "reference" : "9.9.3.18", "presence" : "O", "format" : "TV", "length" : "1"})
ies.append({ "iei" : "55", "value" : "Replayed nonceUE", "type" : "Nonce", "reference" : "9.9.3.25", "presence" : "O", "format" : "TV", "length" : "5"})
ies.append({ "iei" : "56", "value" : "NonceMME", "type" : "Nonce", "reference" : "9.9.3.25", "presence" : "O", "format" : "TV", "length" : "5"})
msg_list[key]["ies"] = ies
| 1,032 | 433 |
import sys
from hummingbot.core.api_throttler.data_types import RateLimit
# REST endpoints
BASE_PATH_URL = "https://api.kucoin.com"
PUBLIC_WS_DATA_PATH_URL = "/api/v1/bullet-public"
PRIVATE_WS_DATA_PATH_URL = "/api/v1/bullet-private"
TICKER_PRICE_CHANGE_PATH_URL = "/api/v1/market/allTickers"
EXCHANGE_INFO_PATH_URL = "/api/v1/symbols"
SNAPSHOT_PATH_URL = "/api/v3/market/orderbook/level2"
SNAPSHOT_NO_AUTH_PATH_URL = "/api/v1/market/orderbook/level2_100"
ACCOUNTS_PATH_URL = "/api/v1/accounts?type=trade"
SERVER_TIME_PATH_URL = "/api/v1/timestamp"
SYMBOLS_PATH_URL = "/api/v1/symbols"
ORDERS_PATH_URL = "/api/v1/orders"
TRADE_ORDERS_ENDPOINT_NAME = "/spotMarket/tradeOrders"
BALANCE_ENDPOINT_NAME = "/account/balance"
PRIVATE_ENDPOINT_NAMES = [
TRADE_ORDERS_ENDPOINT_NAME,
BALANCE_ENDPOINT_NAME,
]
WS_CONNECTION_LIMIT_ID = "WSConnection"
WS_CONNECTION_LIMIT = 30
WS_CONNECTION_TIME_INTERVAL = 60
WS_REQUEST_LIMIT_ID = "WSRequest"
GET_ORDER_LIMIT_ID = "GetOrders"
POST_ORDER_LIMIT_ID = "PostOrder"
DELETE_ORDER_LIMIT_ID = "DeleteOrder"
WS_PING_HEARTBEAT = 10
NO_LIMIT = sys.maxsize
RATE_LIMITS = [
RateLimit(WS_CONNECTION_LIMIT_ID, limit=WS_CONNECTION_LIMIT, time_interval=WS_CONNECTION_TIME_INTERVAL),
RateLimit(WS_REQUEST_LIMIT_ID, limit=100, time_interval=10),
RateLimit(limit_id=PUBLIC_WS_DATA_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=PRIVATE_WS_DATA_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=TICKER_PRICE_CHANGE_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=EXCHANGE_INFO_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=SNAPSHOT_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=SNAPSHOT_NO_AUTH_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=ACCOUNTS_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=SERVER_TIME_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=GET_ORDER_LIMIT_ID, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=POST_ORDER_LIMIT_ID, limit=45, time_interval=3),
RateLimit(limit_id=DELETE_ORDER_LIMIT_ID, limit=60, time_interval=3),
]
| 2,158 | 943 |
from PIL import Image
import numpy as np
from tqdm import tqdm
def histogram_equalization(x):
hist, bins = np.histogram(x.flatten(), 255, [0, 256])
cdf = hist.cumsum()
cdf_m = np.ma.masked_equal(cdf,0)
cdf_m = (cdf_m - cdf_m.min())*255.0/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m,0).astype('uint8')
x2 = cdf[x.astype('uint8')]
return x2, cdf
def get_histogram(pixels, bright=True):
hist = np.zeros((256,1 if bright else 3))
for p in pixels:
if bright:
avg = int((p[0]+p[1]+p[2])/3.0)
hist[avg,0] += 1
else:
hist[p[0],0] += 1
hist[p[1],1] += 1
hist[p[2],2] += 1
return np.array(hist)
def match_histogram2(img1, hist):
colors = img1.getdata()
red, green, blue = [c[0] for c in colors], [c[1] for c in colors], [c[2] for c in colors]
sr = sorted(range(len(red)), key=lambda k: red[k])
sg = sorted(range(len(green)), key=lambda k: green[k])
sb = sorted(range(len(blue)), key=lambda k: blue[k])
hr, hg, hb = [[hist[i][c] for i in range(256)] for c in range(3)]
fr, fg, fb = 0,0,0
for c in range(len(hr)):
nfr, nfg, nfb = int(hr[c]), int(hg[c]), int(hb[c])
idxr = [sr[k] for k in range(fr,fr+nfr)]
idxg = [sg[k] for k in range(fg,fg+nfg)]
idxb = [sb[k] for k in range(fb,fb+nfb)]
for ir in idxr:
red[ir] = c
for ig in idxg:
green[ig] = c
for ib in idxb:
blue[ib] = c
fr, fg, fb = fr+nfr, fg+nfg, fb+nfb
adjusted_colors = zip(red, green, blue)
img_adjusted = Image.new(img1.mode, img1.size)
img_adjusted.putdata(adjusted_colors)
return img_adjusted
def match_histogram(img1, hist):
pixels = list(img1.getdata())
red, green, blue = np.array([c[0] for c in pixels]), np.array([c[1] for c in pixels]), np.array([c[2] for c in pixels])
sr = sorted(range(len(red)), key=lambda k: red[k])
sg = sorted(range(len(green)), key=lambda k: green[k])
sb = sorted(range(len(blue)), key=lambda k: blue[k])
num_pixel_mult = (3 * len(pixels)) / np.sum(hist)
hr, hg, hb = [[int(num_pixel_mult * hist[i][c]) for i in range(256)] for c in range(3)]
fr, fg, fb = 0, 0, 0
for c in range(len(hr)):
nfr, nfg, nfb = int(hr[c]), int(hg[c]), int(hb[c])
red[np.array([sr[k] for k in xrange(fr,fr+nfr)]).astype('int')] = c
green[np.array([sg[k] for k in xrange(fg,fg+nfg)]).astype('int')] = c
blue[np.array([sb[k] for k in xrange(fb,fb+nfb)]).astype('int')] = c
fr, fg, fb = fr+nfr, fg+nfg, fb+nfb
adjusted_pixels = zip(red, green, blue)
img_adjusted = Image.new(img1.mode, img1.size)
img_adjusted.putdata(adjusted_pixels)
return img_adjusted
def adjust_color_range(img, hist, amt, border):
cdf = hist.cumsum() / np.sum(hist)
i1, i2 = min([i for i in range(256) if cdf[i]>border]), max([i for i in range(256) if cdf[i]<1.0-border])
j1, j2 = int((1.0-amt)*i1), i2 + amt*(255-i2)
img2 = np.clip(j1 + (j2-j1)*(img - i1)/(i2-i1), 0.0, 255.0)
return img2
def get_average_histogram(frames_path):
numframes = len([f for f in listdir(frames_path) if isfile(join(frames_path, f)) and f[-4:]=='.png'])
img = Image.open('%s/f00001.png'%(frames_path))
histogram = get_histogram(list(img.getdata()))
for t in tqdm(range(1,numframes,8)):
img = Image.open('%s/f%05d.png'%(frames_path, t+1))
histogram += get_histogram(list(img.getdata()))
histogram /= (1+len(range(1,numframes,8)))
return histogram
| 3,586 | 1,573 |
import numpy as np
from models import SEM, clear_sem
from sklearn import metrics
import pandas as pd
from scipy.special import logsumexp
def logsumexp_mean(x):
return logsumexp(x) - np.log(len(x))
def batch_experiment(sem_kwargs, n_train=1400, n_test=600, progress_bar=True):
# define the graph structure for the experiment
g = np.array([
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
], dtype=float)
# define the random vectors
d = 25
items = np.random.randn(15, d) / np.sqrt(d)
# draw random walks on the graph
def sample_pmf(pmf):
return np.sum(np.cumsum(pmf) < np.random.uniform(0, 1))
train_nodes = [np.random.randint(15)]
for _ in range(n_train-1):
train_nodes.append(sample_pmf(g[train_nodes[-1]] / g[train_nodes[-1]].sum()))
# draw hamiltonian paths from the graph
# this graph defines the same thing but a preference order as well
# higher number are c
preferred_nodes = np.array([
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
], dtype=float)
def sample_hamilton(node0):
is_visited = np.zeros(15, dtype=bool)
counter = 0
nodes = []
while counter < (len(is_visited)):
p = g[node0] * ~is_visited * preferred_nodes
if np.sum(p) == 0:
p = g[node0] * ~is_visited
node0 = sample_pmf(p / np.sum(p))
nodes.append(node0)
is_visited[node0] = True
counter += 1
return nodes
test_nodes = []
node0 = np.random.randint(15)
for _ in range(n_test / 15):
test_nodes += sample_hamilton(node0)
node0 = test_nodes[-1]
# embed the vectors
all_nodes = train_nodes + test_nodes
x = []
for node in all_nodes:
x.append(items[node])
x = np.array(x)
sem_model = SEM(**sem_kwargs)
sem_model.run(x, progress_bar=progress_bar)
# prepared diagnostic measures
clusters = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
node_cluster = []
for node in test_nodes:
node_cluster.append(clusters[node])
node_cluster = np.array(node_cluster)
all_node_cluster = []
for node in all_nodes:
all_node_cluster.append(clusters[node])
all_node_cluster = np.array(all_node_cluster)
all_boundaries_true = np.concatenate([[False], (all_node_cluster[1:] != all_node_cluster[:-1])])
test_boundaries = sem_model.results.e_hat[n_train-1:-1] != sem_model.results.e_hat[n_train:]
boundaries = sem_model.results.e_hat[:n_train-1] != sem_model.results.e_hat[1:n_train]
test_bound_prob = sem_model.results.log_boundary_probability[n_train:]
bound_prob = sem_model.results.log_boundary_probability[1:n_train]
# pull the prediction error (Bayesian Suprise)
test_pe = sem_model.results.surprise[n_train:]
bound_pe = sem_model.results.surprise[1:n_train]
# cache the correlation between log boundary probability and log surprise
r = np.corrcoef(
sem_model.results.log_boundary_probability, sem_model.results.surprise
)[0][1]
output = {
'Community Transitions (Hamilton)': np.exp(logsumexp_mean(test_bound_prob[all_boundaries_true[1400:]])),
'Other Parse (Hamilton)': np.exp(logsumexp_mean(test_bound_prob[all_boundaries_true[1400:]==False])),
'Community Transitions (All Other Trials)': np.exp(logsumexp_mean(bound_prob[all_boundaries_true[1:n_train]])),
'Other Parse (All Other Trials)': np.exp(logsumexp_mean(bound_prob[all_boundaries_true[1:n_train]==False])),
'PE Community Transitions (Hamilton)': logsumexp_mean(test_pe[all_boundaries_true[1400:]]),
'PE Other Parse (Hamilton)': logsumexp_mean(test_pe[all_boundaries_true[1400:]==False]),
'PE Community Transitions (All Other Trials)': logsumexp_mean(bound_pe[all_boundaries_true[1:n_train]]),
'PE Other Parse (All Other Trials)': logsumexp_mean(bound_pe[all_boundaries_true[1:n_train]==False]),
'r':r
}
# clear_sem_model
clear_sem(sem_model)
sem_model = None
return output
| 4,911 | 2,201 |
"""
The :mod:`mlshell.producers.dataset` contains examples of `Dataset` class for
empty data object creation and `DataProducer` class for filling it.
:class:`mlshell.Dataset` proposes unified interface to interact with underlying
data. Intended to be used in :class:`mlshell.Workflow`. For new data formats
no need to edit `Workflow` class, adapt `Dataset` in compliance to interface.
Current realization based on dictionary.
:class:`mlshell.DataProducer` specifies methods divided for convenience on:
* :class:`mlshell.DataIO` defining IO related methods.
Currently reading from csv-file implemented.
* :class:`mlshell.DataPreprocessor` preprocessing data to final state.
Implemented data transformation in compliance to `Dataset` class, also common
exploration techniques available.
"""
import copy
import os
import jsbeautifier
import numpy as np
import pandas as pd
import pycnfg
import sklearn
import tabulate
__all__ = ['Dataset', 'DataIO', 'DataPreprocessor', 'DatasetProducer']
class Dataset(dict):
"""Unified data interface.
Implements interface to access arbitrary data.
Interface: x, y, data, meta, subset, dump_pred and whole dict api.
Parameters
----------
*args : list
Passed to parent class constructor.
**kwrags : dict
Passed to parent class constructor.
Attributes
----------
data : :class:`pandas.DataFrame`
Underlying data.
subsets : dict
{'subset_id' : array-like subset indices, ..}.
meta : dict
Extracted auxiliary information from data: {
'index': list
List of index column label(s).
'features': list
List of feature column label(s).
'categoric_features': list
List of categorical feature column label(s).
'targets': list
List of target column label(s),
'indices': list
List of rows indices.
'classes': list of :class:`numpy.ndarray`
List of sorted unique labels for each target(s) (n_outputs,
n_classes).
'pos_labels': list
List of "positive" label(s) for target(s) (n_outputs,).
'pos_labels_ind': list
List of "positive" label(s) index in :func:`numpy.unique`
for target(s) (n_outputs).
categoric_ind_name : dict
Dictionary with categorical feature indices as key, and
tuple ('feature_name', categories) as value:
{'column_index': ('feature_name', ['cat1', 'cat2'])}.
numeric_ind_name : dict
Dictionary with numeric features indices as key, and tuple
('feature_name', ) as value: {'columns_index':('feature_name',)}.
}
Notes
-----
Inherited from dict class, so attributes section describes keys.
"""
_required_parameters = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __hash__(self):
return hash(pd.util.hash_pandas_object(self['data']).sum())
@property
def oid(self):
"""str: Dataset identifier."""
return self['_oid']
@oid.setter
def oid(self, value):
self['_oid'] = value
@property
def x(self):
""":class:`pandas.DataFrame` : Extracted features columns."""
df = self['data']
meta = self['meta']
return df.loc[:, meta['features']]
@property
def y(self):
""":class:`pandas.DataFrame` : Extracted targets columns."""
df = self['data']
meta = self['meta']
# return df[meta['targets']].values
res = df.loc[:, meta['targets']].values.ravel() \
if len(meta['targets']) == 1 else df.loc[:, meta['targets']].values
return res
@property
def meta(self):
"""dict: Access meta."""
return self['meta']
@property
def data(self):
""":class:`pandas.DataFrame` : Access data."""
return self['data']
def subset(self, subset_id):
""":class:`mlshell.Dataset` : Access subset. """
if subset_id is '':
return self
df = self['data']
index = self['subsets'][subset_id] # subset of meta['inices']
# Inherit only meta (except indices).
# dict(self) will inherit by ref.
dataset = Dataset(**{
'meta': copy.deepcopy(self.meta),
'data': df.loc[index],
'subsets': {},
'_oid': f"{self['_oid']}__{subset_id}"})
# Update indices in meta.
dataset.meta['indices'] = index
# if reset_index: np.array(dataset.meta['indices'])[index].tolist()
return dataset
def dump_pred(self, filepath, y_pred, **kwargs):
"""Dump columns to disk.
Parameters
----------
filepath: str
File path without extension.
y_pred: array-like
pipeline.predict() result.
**kwargs: dict
` Additional kwargs to pass in .to_csv(**kwargs).
Returns
-------
fullpath : str
Full filepath.
"""
meta = self.meta
# Recover original index and names.
dic = dict(zip(
meta['targets'],
[y_pred] if len(meta['targets']) == 1 else np.array(y_pred).T
))
obj = pd.DataFrame(index=meta['indices'],
data=dic).rename_axis(meta['index'], axis=0)
fullpath = f"{filepath}_pred.csv"
if "PYTEST_CURRENT_TEST" in os.environ:
if 'float_format' not in kwargs:
kwargs['float_format'] = '%.8f'
with open(fullpath, 'w', newline='') as f:
obj.to_csv(f, mode='w', header=True, index=True, sep=',',
line_terminator='\n', **kwargs)
return fullpath
class DataIO(object):
"""Get raw data from database.
Interface: load.
Parameters
----------
project_path: str.
Absolute path to current project dir.
logger : :class:`logging.Logger`
Logger.
"""
_required_parameters = ['project_path', 'logger']
def __init__(self, project_path, logger):
self.logger = logger
self.project_path = project_path
def load(self, dataset, filepath, key='data',
random_skip=False, random_state=None, **kwargs):
"""Load data from csv-file.
Parameters
----------
dataset : :class:`mlshell.Dataset`
Template for dataset.
filepath : str
Absolute path to csv file or relative to 'project__path' started
with './'.
key : str, optional (default='data')
Loaded data identifier to add in dataset dictionary. Useful when
load multiple files and combine them in separate step under 'data'.
random_skip : bool, optional (default=False)
If True randomly skip rows while read file, remain 'nrow' lines.
Rewrite `skiprows` kwarg.
random_state : int, optional (default=None).
Fix random state for `random_skip`.
**kwargs : dict
Additional parameter passed to the :func:`pandas.read_csv()` .
Returns
-------
dataset : :class:`mlshell.Dataset`
Key added: {'data': :class:`pandas.DataFrame` ,}.
Notes:
------
If `nrow` > lines in file, auto set to None.
"""
if filepath.startswith('./'):
filepath = "{}/{}".format(self.project_path, filepath[2:])
# Count lines.
with open(filepath, 'r') as f:
lines = sum(1 for _ in f)
if 'skiprows' in kwargs and random_skip:
self.logger.warning("random_skip rewrite skiprows kwarg.")
nrows = kwargs.get('nrows', None)
skiprows = kwargs.get('skiprows', None)
if nrows:
if nrows > lines:
nrows = None
elif random_skip:
# skiprows index start from 0.
# If no headers, returns nrows+1.
random_state = sklearn.utils.check_random_state(random_state)
skiprows = random_state.choice(range(1, lines),
size=lines - nrows - 1,
replace=False, p=None)
kwargs['skiprows'] = skiprows
kwargs['nrows'] = nrows
with open(filepath, 'r') as f:
raw = pd.read_csv(f, **kwargs)
self.logger.info("Data loaded from:\n {}".format(filepath))
dataset[key] = raw
return dataset
class DataPreprocessor(object):
"""Transform raw data in compliance with `Dataset` class.
Interface: preprocess, info, split.
Parameters
----------
project_path: str.
Absolute path to current project dir.
logger : :class:`logging.Logger`
Logger.
"""
_required_parameters = ['project_path', 'logger']
def __init__(self, project_path, logger):
self.logger = logger
self.project_path = project_path
def preprocess(self, dataset, targets_names, features_names=None,
categor_names=None, pos_labels=None, **kwargs):
"""Preprocess raw data.
Parameters
----------
dataset : :class:`mlshell.Dataset`
Raw dataset: {'data': :class:`pandas.DataFrame` }.
targets_names: list
List of targets columns names in raw dataset. Even if no exist,
will be used to name predictions in ``dataset.dump_pred`` .
features_names: list, optional (default=None)
List of features columns names in raw dataset. If None, all except
targets.
categor_names: list, optional (default=None)
List of categorical features(also binary) identifiers in raw
dataset. If None, empty list.
pos_labels: list, optional (default=None)
Classification only, list of "positive" label(s) in target(s).
Could be used in :func:`sklearn.metrics.roc_curve` for
threshold analysis and metrics evaluation if classifier supports
``predict_proba``. If None, for each target last label in
:func:`numpy.unique` is used . For regression set [] to prevent
evaluation.
**kwargs : dict
Additional parameters to add in dataset.
Returns
-------
dataset : :class:`mlshell.Dataset`
Resulted dataset. Key updated: 'data'. Keys added:
'subsets': dict
Storage for data subset(s) indices (filled in split method)
{'subset_id': indices}.
'meta' : dict
Extracted auxiliary information from data:
{
'index': list
List of index column label(s).
'features': list
List of feature column label(s).
'categoric_features': list
List of categorical feature column label(s).
'targets': list
List of target column label(s),
'indices': list
List of rows indices.
'classes': list of :class:`numpy.ndarray`
List of sorted unique labels for each target(s) (n_outputs,
n_classes).
'pos_labels': list
List of "positive" label(s) for target(s) (n_outputs,).
'pos_labels_ind': list
List of "positive" label(s) index in :func:`numpy.unique`
for target(s) (n_outputs).
categoric_ind_name : dict
Dictionary with categorical feature indices as key, and
tuple ('feature_name', categories) as value:
{'column_index': ('feature_name', ['cat1', 'cat2'])}.
numeric_ind_name : dict
Dictionary with numeric features indices as key, and tuple
('feature_name', ) as value: {'columns_index':
('feature_name',)}.
}
Notes
-----
Don`t change dataframe shape or index/columns names after ``meta``
generating.
Features columns unified:
* Fill gaps.
* If gap in categorical => set 'unknown'.
* If gap in non-categorical => set np.nan.
* Cast categorical features to str dtype, and apply Ordinal encoder.
* Cast values to np.float64.
"""
raw = dataset['data']
if categor_names is None:
categor_names = []
if features_names is None:
features_names = [c for c in raw.columns if c not in targets_names]
for i in (targets_names, features_names, categor_names):
if not isinstance(i, list):
raise TypeError(f"{i} should be a list.")
index = raw.index
targets_df, raw_info_targets =\
self._process_targets(raw, targets_names, pos_labels)
features_df, raw_info_features =\
self._process_features(raw, features_names, categor_names)
data = self._combine(index, targets_df, features_df)
meta = {
'index': index.name,
'indices': list(index),
'targets': targets_names,
'features': list(features_names),
'categoric_features': categor_names,
**raw_info_features,
**raw_info_targets,
}
self.logger.debug(f"Dataset meta:\n {meta}")
dataset.update({'data': data,
'meta': meta,
'subsets': {},
**kwargs})
return dataset
def info(self, dataset, **kwargs):
"""Log dataset info.
Check:
* duplicates.
* gaps.
Parameters
----------
dataset : :class:`mlshell.Dataset`
Dataset to explore.
**kwargs : dict
Additional parameters to pass in low-level functions.
Returns
-------
dataset : :class:`mlshell.Dataset`
For compliance with producer logic.
"""
self._check_duplicates(dataset['data'], **kwargs)
self._check_gaps(dataset['data'], **kwargs)
return dataset
def split(self, dataset, **kwargs):
"""Split dataset on train, test.
Parameters
----------
dataset : :class:`mlshell.Dataset`
Dataset to unify.
**kwargs : dict
Additional parameters to pass in:
:func:`sklearn.model_selection.train_test_split` .
Returns
-------
dataset : :class:`mlshell.Dataset`
Resulted dataset. 'subset' value updated:
{'train': array-like train rows indices,
'test': array-like test rows indices,}
Notes
-----
If split ``train_size==1.0`` or ``test_size==0``: ``test=train`` ,
other kwargs ignored.
No copy takes place.
"""
if 'test_size' not in kwargs:
kwargs['test_size'] = None
if 'train_size' not in kwargs:
kwargs['train_size'] = None
data = dataset['data']
if (kwargs['train_size'] == 1.0 and kwargs['test_size'] is None
or kwargs['train_size'] is None and kwargs['test_size'] == 0):
# train = test = data
train_index = test_index = data.index
else:
train, test, train_index, test_index = \
sklearn.model_selection.train_test_split(
data, data.index.values, **kwargs)
# Add to dataset.
dataset['subsets'].update({'train': train_index,
'test': test_index})
return dataset
# ============================== preprocess ===============================
def _process_targets(self, raw, target_names, pos_labels):
"""Targets preprocessing."""
try:
targets_df = raw[target_names]
except KeyError:
self.logger.warning(f"No target column(s) found in df:\n"
f" {target_names}")
targets_df = pd.DataFrame()
targets_df, classes, pos_labels, pos_labels_ind =\
self._unify_targets(targets_df, pos_labels)
# targets = targets_df.values
raw_info_targets = {
'classes': classes,
'pos_labels': pos_labels,
'pos_labels_ind': pos_labels_ind,
}
return targets_df, raw_info_targets
def _process_features(self, raw, features_names, categor_names):
"""Features preprocessing."""
features_df = raw[features_names]
features_df, categoric_ind_name, numeric_ind_name \
= self._unify_features(features_df, categor_names)
# features = features_df.values
raw_info_features = {
'categoric_ind_name': categoric_ind_name,
'numeric_ind_name': numeric_ind_name, }
return features_df, raw_info_features
def _combine(self, index, targets_df, features_df):
"""Combine preprocessed sub-data."""
# targets_df empty dataframe or None is possible
return pd.concat(
[targets_df, features_df],
axis=1,
)
def _unify_targets(self, targets, pos_labels=None):
"""Unify input targets.
Extract classes and positive label index (classification only).
Parameters
----------
targets : :class:`pandas.DataFrame`
Data to unify.
pos_labels: list, optional (default=None)
Classification only, list of "positive" labels for targets.
Could be used for threshold analysis (roc_curve) and metrics
evaluation if classifiers supported predict_proba. If None, last
label in :func:`numpy.unique` for each target used. For regression
set [] to prevent evaluation.
Returns
-------
targets: :class:`pandas.DataFrame`
Unchanged input.
classes: list of :class:`numpy.ndarray`
List of sorted unique labels for target(s) (n_outputs, n_classes).
pos_labels: list
List of "positive" label(s) for target(s) (n_outputs,).
pos_labels_ind: list
List of "positive" label(s) index in :func:`numpy.unique`
for target(s) (n_outputs,).
"""
# Regression.
if isinstance(pos_labels, list) and not pos_labels:
classes = []
pos_labels_ind = []
return targets, classes, pos_labels, pos_labels_ind
# Classification.
# Find classes, example: [array([1]), array([2, 7])].
classes = [np.unique(j) for i, j in targets.iteritems()]
if pos_labels is None:
n_targets = len(classes)
pos_labels_ind = [len(classes[i]) - 1 for i in range(n_targets)]
pos_labels = [classes[i][pos_labels_ind[i]]
for i in range(n_targets)] # [2,4]
else:
# Find where pos_labels in sorted labels, example: [1, 0].
pos_labels_ind = [np.where(classes[i] == pos_labels[i])[0][0]
for i in range(len(classes))]
# Could be no target columns in new data.
self.logger.debug(
f"Labels {pos_labels} identified as positive for target(s):\n"
f" when classifier supports predict_proba: prediction="
f"pos_label on sample, if P(pos_label) > classification "
f"threshold.")
return targets, classes, pos_labels, pos_labels_ind
def _unify_features(self, features, categor_names):
"""Unify input features.
Parameters
----------
features : :class:`pandas.DataFrame`
Data to unify.
categor_names: list
List of categorical features (and binary) column names in features.
Returns
-------
features: :class:`pandas.DataFrame`
Input updates:
* fill gaps.
if gap in categorical => fill 'unknown'
if gap in non-categor => np.nan
* cast categorical features to str dtype, and apply Ordinalencoder.
* cast the whole featuresframe to np.float64.
categoric_ind_name : dict
{'column_index': ('feature_name', ['cat1', 'cat2'])}
Dictionary with categorical feature indices as key, and tuple
('feature_name', categories) as value.
numeric_ind_name : dict {'columns_index':('feature_name',)}
Dictionary with numeric features indices as key, and tuple
('feature_name', ) as value.
"""
categoric_ind_name = {}
numeric_ind_name = {}
# Turn off: SettingWithCopy, excessive.
pd.options.mode.chained_assignment = None
for ind, column_name in enumerate(features):
if column_name in categor_names:
# Fill gaps with 'unknown', inplace unreliable (copy!).
features.loc[:, column_name] = features[column_name]\
.fillna(value='unknown', method=None, axis=None,
inplace=False, limit=None, downcast=None)
# Cast dtype to str (copy!).
features.loc[:, column_name] = features[column_name].astype(str)
# Encode
encoder = sklearn.preprocessing.\
OrdinalEncoder(categories='auto')
features.loc[:, column_name] = encoder\
.fit_transform(features[column_name]
.values.reshape(-1, 1))
# Generate {index: ('feature_id', ['B','A','C'])}.
# tolist need for 'hr' cache dump.
categoric_ind_name[ind] = (column_name,
encoder.categories_[0].tolist())
else:
# Fill gaps with np.nan, inplace unreliable (copy!).
# Could work with no copy on slice or single col even inplace.
features.loc[:, column_name] = features.loc[:, column_name]\
.fillna(value=np.nan, method=None, axis=None,
inplace=False, downcast=None)
# Generate {'index': ('feature_id',)}.
numeric_ind_name[ind] = (column_name,)
# Turn on: SettingWithCopy.
pd.options.mode.chained_assignment = 'warn'
# Cast to np.float64 without copy.
# python float = np.float = C double =
# np.float64 = np.double(64 bit processor)).
# [alternative] sklearn.utils.as_float_array / assert_all_finite
features = features.astype(np.float64, copy=False, errors='ignore')
# Additional check.
self._check_numeric_types(features, categor_names)
return features, categoric_ind_name, numeric_ind_name
def _check_numeric_types(self, data, categor_names):
"""Check that all non-categorical features are of numeric type."""
dtypes = data.dtypes
misstype = []
for ind, column_name in enumerate(data):
if column_name not in categor_names:
if not np.issubdtype(dtypes[column_name], np.number):
misstype.append(column_name)
if misstype:
raise ValueError(f"Input data non-categoric columns should be "
f"subtype of np.number, check:\n"
f" {misstype}")
return None
# ================================ info ===================================
def _check_duplicates(self, data, del_duplicates=False):
"""Check duplicates rows in dataframe.
Parameters
----------
data : :class:`pandas.DataFrame`
Dataframe to check.
del_duplicates : bool
If True, delete rows with duplicated.
If False, do nothing.
Notes
-----
Use del_duplicates=True only before generating dataset `meta`.
"""
# Duplicate rows index mask.
mask = data.duplicated(subset=None, keep='first')
dupl_n = np.sum(mask)
if dupl_n:
self.logger.warning(f"Warning: {dupl_n} duplicates rows found,\n"
" see debug.log for details.")
# Count unique duplicated rows.
rows_count = data[mask].groupby(data.columns.tolist())\
.size().reset_index().rename(columns={0: 'count'})
rows_count.sort_values(by=['count'], axis=0,
ascending=False, inplace=True)
with pd.option_context('display.max_rows', None,
'display.max_columns', None):
pprint = tabulate.tabulate(rows_count, headers='keys',
tablefmt='psql')
self.logger.debug(f"Duplicates found\n{pprint}")
if del_duplicates:
# Delete duplicates (without index reset).
size_before = data.size
data.drop_duplicates(keep='first', inplace=True)
size_after = data.size
if size_before - size_after != 0:
self.logger.warning(f"Warning: delete duplicates rows "
f"({size_before - size_after} values).")
return None
def _check_gaps(self, data, del_gaps=False, nogap_columns=None):
"""Check gaps in dataframe.
Parameters
----------
data : :class:`pandas.DataFrame`
Dataframe to check.
del_gaps : bool, optional (default=False)
If True, delete rows with gaps from `nongap_columns` list.
If False, raise Exception when `nongap_columns` contain gaps.
nogap_columns : list, optional (default=None)
Columns where gaps are forbidden: ['column_1', ..]. if None, [].
Notes
-----
Use del_geps=True only before generating dataset `meta` (preprocess).
"""
if nogap_columns is None:
nogap_columns = []
gaps_number = data.size - data.count().sum()
columns_with_gaps_dic = {}
if gaps_number > 0:
for column_name in data:
column_gaps_namber = data[column_name].size \
- data[column_name].count()
if column_gaps_namber > 0:
columns_with_gaps_dic[column_name] = column_gaps_namber
self.logger.warning('Warning: gaps found: {} {:.3f}%,\n'
' see debug.log for details.'
.format(gaps_number, gaps_number / data.size))
pprint = jsbeautifier.beautify(str(columns_with_gaps_dic))
self.logger.debug(f"Gaps per column:\n{pprint}")
subset = [column_name for column_name in nogap_columns
if column_name in columns_with_gaps_dic]
if del_gaps and subset:
# Delete rows with gaps in specified columns.
data.dropna(axis=0, how='any', thresh=None,
subset=[subset], inplace=True)
elif subset:
raise ValueError(f"Gaps in {subset}.")
return None
class DatasetProducer(pycnfg.Producer, DataIO, DataPreprocessor):
"""Factory to produce dataset.
Parameters
----------
objects : dict
Dictionary with objects from previous executed producers:
{'section_id__config__id', object,}.
oid : str
Unique identifier of produced object.
path_id : str, optional (default='default')
Project path identifier in `objects`.
logger_id : str, optional (default='default')
Logger identifier in `objects`.
Attributes
----------
objects : dict
Dictionary with objects from previous executed producers:
{'section_id__config__id', object,}.
oid : str
Unique identifier of produced object.
logger : :class:`logging.Logger`
Logger.
project_path: str
Absolute path to project dir.
"""
_required_parameters = ['objects', 'oid', 'path_id', 'logger_id']
def __init__(self, objects, oid, path_id='path__default',
logger_id='logger__default'):
pycnfg.Producer.__init__(self, objects, oid, path_id=path_id,
logger_id=logger_id)
DataIO.__init__(self, self.project_path, self.logger)
DataPreprocessor.__init__(self, self.project_path, self.logger)
if __name__ == '__main__':
pass
| 28,818 | 7,812 |
a, b = map(int, input().split())
print((a%b)*(b%a)+1) | 53 | 27 |
"""
this file contains tuned obs function and reward function
fix ttc calculate
"""
import math
import gym
import numpy as np
from smarts.core.agent import AgentSpec
from smarts.core.agent_interface import AgentInterface
from smarts.core.agent_interface import OGM, NeighborhoodVehicles
from smarts.core.controllers import ActionSpaceType, DiscreteAction
MAX_LANES = 5 # The maximum number of lanes we expect to see in any scenario.
lane_crash_flag = False # used for training to signal a flipped car
intersection_crash_flag = False # used for training to signal intersect crash
# ==================================================
# Discrete Action Space
# "keep_lane", "slow_down", "change_lane_left", "change_lane_right"
# ==================================================
ACTION_SPACE = gym.spaces.Discrete(4)
ACTION_CHOICE = [
DiscreteAction.keep_lane,
DiscreteAction.slow_down,
DiscreteAction.change_lane_left,
DiscreteAction.change_lane_right,
]
# ==================================================
# Observation Space
# This observation space should match the output of observation(..) below
# ==================================================
OBSERVATION_SPACE = gym.spaces.Dict(
{
# To make car follow the waypoints
# distance from lane center
"distance_from_center": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
# relative heading angle from 10 waypoints in 50 forehead waypoints
"heading_errors": gym.spaces.Box(low=-1.0, high=1.0, shape=(10,)),
# Car attributes
# ego speed
"speed": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
# ego steering
"steering": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
# To make car learn to slow down, overtake or dodge
# distance to the closest car in each lane
"lane_dist": gym.spaces.Box(low=-1e10, high=1e10, shape=(5,)),
# time to collide to the closest car in each lane
"lane_ttc": gym.spaces.Box(low=-1e10, high=1e10, shape=(5,)),
# ego lane closest social vehicle relative speed
"closest_lane_nv_rel_speed": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
# distance to the closest car in possible intersection direction
"intersection_ttc": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
# time to collide to the closest car in possible intersection direction
"intersection_distance": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
# intersection closest social vehicle relative speed
"closest_its_nv_rel_speed": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
# intersection closest social vehicle relative position in vehicle heading coordinate
"closest_its_nv_rel_pos": gym.spaces.Box(low=-1e10, high=1e10, shape=(2,)),
}
)
def heading_to_degree(heading):
# +y = 0 rad. Note the 0 means up direction
return np.degrees((heading + math.pi) % (2 * math.pi))
def heading_to_vec(heading):
# axis x: right, y:up
angle = (heading + math.pi * 0.5) % (2 * math.pi)
return np.array([math.cos(angle), math.sin(angle)])
def ttc_by_path(ego, wp_paths, neighborhood_vehicle_states, ego_closest_wp):
global lane_crash_flag
global intersection_crash_flag
# init flag, dist, ttc, headings
lane_crash_flag = False
intersection_crash_flag = False
# default 10s
lane_ttc = np.array([1] * 5, dtype=float)
# default 100m
lane_dist = np.array([1] * 5, dtype=float)
# default 120km/h
closest_lane_nv_rel_speed = 1
intersection_ttc = 1
intersection_distance = 1
closest_its_nv_rel_speed = 1
# default 100m
closest_its_nv_rel_pos = np.array([1, 1])
# here to set invalid value to 0
wp_paths_num = len(wp_paths)
lane_ttc[wp_paths_num:] = 0
lane_dist[wp_paths_num:] = 0
# return if no neighbour vehicle or off the routes(no waypoint paths)
if not neighborhood_vehicle_states or not wp_paths_num:
return (
lane_ttc,
lane_dist,
closest_lane_nv_rel_speed,
intersection_ttc,
intersection_distance,
closest_its_nv_rel_speed,
closest_its_nv_rel_pos,
)
# merge waypoint paths (consider might not the same length)
merge_waypoint_paths = []
for wp_path in wp_paths:
merge_waypoint_paths += wp_path
wp_poses = np.array([wp.pos for wp in merge_waypoint_paths])
# compute neighbour vehicle closest wp
nv_poses = np.array([nv.position for nv in neighborhood_vehicle_states])
nv_wp_distance = np.linalg.norm(nv_poses[:, :2][:, np.newaxis] - wp_poses, axis=2)
nv_closest_wp_index = np.argmin(nv_wp_distance, axis=1)
nv_closest_distance = np.min(nv_wp_distance, axis=1)
# get not in same lane id social vehicles(intersect vehicles and behind vehicles)
wp_lane_ids = np.array([wp.lane_id for wp in merge_waypoint_paths])
nv_lane_ids = np.array([nv.lane_id for nv in neighborhood_vehicle_states])
not_in_same_lane_id = nv_lane_ids[:, np.newaxis] != wp_lane_ids
not_in_same_lane_id = np.all(not_in_same_lane_id, axis=1)
ego_edge_id = ego.lane_id[1:-2] if ego.lane_id[0] == "-" else ego.lane_id[:-2]
nv_edge_ids = np.array(
[
nv.lane_id[1:-2] if nv.lane_id[0] == "-" else nv.lane_id[:-2]
for nv in neighborhood_vehicle_states
]
)
not_in_ego_edge_id = nv_edge_ids[:, np.newaxis] != ego_edge_id
not_in_ego_edge_id = np.squeeze(not_in_ego_edge_id, axis=1)
is_not_closed_nv = not_in_same_lane_id & not_in_ego_edge_id
not_closed_nv_index = np.where(is_not_closed_nv)[0]
# filter sv not close to the waypoints including behind the ego or ahead past the end of the waypoints
close_nv_index = np.where(nv_closest_distance < 2)[0]
if not close_nv_index.size:
pass
else:
close_nv = [neighborhood_vehicle_states[i] for i in close_nv_index]
# calculate waypoints distance to ego car along the routes
wps_with_lane_dist_list = []
for wp_path in wp_paths:
path_wp_poses = np.array([wp.pos for wp in wp_path])
wp_poses_shift = np.roll(path_wp_poses, 1, axis=0)
wps_with_lane_dist = np.linalg.norm(path_wp_poses - wp_poses_shift, axis=1)
wps_with_lane_dist[0] = 0
wps_with_lane_dist = np.cumsum(wps_with_lane_dist)
wps_with_lane_dist_list += wps_with_lane_dist.tolist()
wps_with_lane_dist_list = np.array(wps_with_lane_dist_list)
# get neighbour vehicle closest waypoints index
nv_closest_wp_index = nv_closest_wp_index[close_nv_index]
# ego car and neighbour car distance, not very accurate since use the closest wp
ego_nv_distance = wps_with_lane_dist_list[nv_closest_wp_index]
# get neighbour vehicle lane index
nv_lane_index = np.array(
[merge_waypoint_paths[i].lane_index for i in nv_closest_wp_index]
)
# get wp path lane index
lane_index_list = [wp_path[0].lane_index for wp_path in wp_paths]
for i, lane_index in enumerate(lane_index_list):
# get same lane vehicle
same_lane_nv_index = np.where(nv_lane_index == lane_index)[0]
if not same_lane_nv_index.size:
continue
same_lane_nv_distance = ego_nv_distance[same_lane_nv_index]
closest_nv_index = same_lane_nv_index[np.argmin(same_lane_nv_distance)]
closest_nv = close_nv[closest_nv_index]
closest_nv_speed = closest_nv.speed
closest_nv_heading = closest_nv.heading
# radius to degree
closest_nv_heading = heading_to_degree(closest_nv_heading)
closest_nv_pos = closest_nv.position[:2]
bounding_box = closest_nv.bounding_box
# map the heading to make it consistent with the position coordination
map_heading = (closest_nv_heading + 90) % 360
map_heading_radius = np.radians(map_heading)
nv_heading_vec = np.array(
[np.cos(map_heading_radius), np.sin(map_heading_radius)]
)
nv_heading_vertical_vec = np.array([-nv_heading_vec[1], nv_heading_vec[0]])
# get four edge center position (consider one vehicle take over two lanes when change lane)
# maybe not necessary
closest_nv_front = closest_nv_pos + bounding_box.length * nv_heading_vec
closest_nv_behind = closest_nv_pos - bounding_box.length * nv_heading_vec
closest_nv_left = (
closest_nv_pos + bounding_box.width * nv_heading_vertical_vec
)
closest_nv_right = (
closest_nv_pos - bounding_box.width * nv_heading_vertical_vec
)
edge_points = np.array(
[closest_nv_front, closest_nv_behind, closest_nv_left, closest_nv_right]
)
ep_wp_distance = np.linalg.norm(
edge_points[:, np.newaxis] - wp_poses, axis=2
)
ep_closed_wp_index = np.argmin(ep_wp_distance, axis=1)
ep_closed_wp_lane_index = set(
[merge_waypoint_paths[i].lane_index for i in ep_closed_wp_index]
+ [lane_index]
)
min_distance = np.min(same_lane_nv_distance)
if ego_closest_wp.lane_index in ep_closed_wp_lane_index:
if min_distance < 6:
lane_crash_flag = True
nv_wp_heading = (
closest_nv_heading
- heading_to_degree(
merge_waypoint_paths[
nv_closest_wp_index[closest_nv_index]
].heading
)
) % 360
# find those car just get from intersection lane into ego lane
if nv_wp_heading > 30 and nv_wp_heading < 330:
relative_close_nv_heading = closest_nv_heading - heading_to_degree(
ego.heading
)
# map nv speed to ego car heading
map_close_nv_speed = closest_nv_speed * np.cos(
np.radians(relative_close_nv_heading)
)
closest_lane_nv_rel_speed = min(
closest_lane_nv_rel_speed,
(map_close_nv_speed - ego.speed) * 3.6 / 120,
)
else:
closest_lane_nv_rel_speed = min(
closest_lane_nv_rel_speed,
(closest_nv_speed - ego.speed) * 3.6 / 120,
)
relative_speed_m_per_s = ego.speed - closest_nv_speed
if abs(relative_speed_m_per_s) < 1e-5:
relative_speed_m_per_s = 1e-5
ttc = min_distance / relative_speed_m_per_s
# normalized into 10s
ttc /= 10
for j in ep_closed_wp_lane_index:
if min_distance / 100 < lane_dist[j]:
# normalize into 100m
lane_dist[j] = min_distance / 100
if ttc <= 0:
continue
if j == ego_closest_wp.lane_index:
if ttc < 0.1:
lane_crash_flag = True
if ttc < lane_ttc[j]:
lane_ttc[j] = ttc
# get vehicles not in the waypoints lane
if not not_closed_nv_index.size:
pass
else:
filter_nv = [neighborhood_vehicle_states[i] for i in not_closed_nv_index]
nv_pos = np.array([nv.position for nv in filter_nv])[:, :2]
nv_heading = heading_to_degree(np.array([nv.heading for nv in filter_nv]))
nv_speed = np.array([nv.speed for nv in filter_nv])
ego_pos = ego.position[:2]
ego_heading = heading_to_degree(ego.heading)
ego_speed = ego.speed
nv_to_ego_vec = nv_pos - ego_pos
line_heading = (
(np.arctan2(nv_to_ego_vec[:, 1], nv_to_ego_vec[:, 0]) * 180 / np.pi) - 90
) % 360
nv_to_line_heading = (nv_heading - line_heading) % 360
ego_to_line_heading = (ego_heading - line_heading) % 360
# judge two heading whether will intersect
same_region = (nv_to_line_heading - 180) * (
ego_to_line_heading - 180
) > 0 # both right of line or left of line
ego_to_nv_heading = ego_to_line_heading - nv_to_line_heading
valid_relative_angle = (
(nv_to_line_heading - 180 > 0) & (ego_to_nv_heading > 0)
) | ((nv_to_line_heading - 180 < 0) & (ego_to_nv_heading < 0))
# emit behind vehicles
valid_intersect_angle = np.abs(line_heading - ego_heading) < 90
# emit patient vehicles which stay in the intersection
not_patient_nv = nv_speed > 0.01
# get valid intersection sv
intersect_sv_index = np.where(
same_region & valid_relative_angle & valid_intersect_angle & not_patient_nv
)[0]
if not intersect_sv_index.size:
pass
else:
its_nv_pos = nv_pos[intersect_sv_index][:, :2]
its_nv_speed = nv_speed[intersect_sv_index]
its_nv_to_line_heading = nv_to_line_heading[intersect_sv_index]
line_heading = line_heading[intersect_sv_index]
# ego_to_line_heading = ego_to_line_heading[intersect_sv_index]
# get intersection closest vehicle
ego_nv_distance = np.linalg.norm(its_nv_pos - ego_pos, axis=1)
ego_closest_its_nv_index = np.argmin(ego_nv_distance)
ego_closest_its_nv_distance = ego_nv_distance[ego_closest_its_nv_index]
line_heading = line_heading[ego_closest_its_nv_index]
ego_to_line_heading = (
heading_to_degree(ego_closest_wp.heading) - line_heading
) % 360
ego_closest_its_nv_speed = its_nv_speed[ego_closest_its_nv_index]
its_closest_nv_to_line_heading = its_nv_to_line_heading[
ego_closest_its_nv_index
]
# rel speed along ego-nv line
closest_nv_rel_speed = ego_speed * np.cos(
np.radians(ego_to_line_heading)
) - ego_closest_its_nv_speed * np.cos(
np.radians(its_closest_nv_to_line_heading)
)
closest_nv_rel_speed_m_s = closest_nv_rel_speed
if abs(closest_nv_rel_speed_m_s) < 1e-5:
closest_nv_rel_speed_m_s = 1e-5
ttc = ego_closest_its_nv_distance / closest_nv_rel_speed_m_s
intersection_ttc = min(intersection_ttc, ttc / 10)
intersection_distance = min(
intersection_distance, ego_closest_its_nv_distance / 100
)
# transform relative pos to ego car heading coordinate
rotate_axis_angle = np.radians(90 - ego_to_line_heading)
closest_its_nv_rel_pos = (
np.array(
[
ego_closest_its_nv_distance * np.cos(rotate_axis_angle),
ego_closest_its_nv_distance * np.sin(rotate_axis_angle),
]
)
/ 100
)
closest_its_nv_rel_speed = min(
closest_its_nv_rel_speed, -closest_nv_rel_speed * 3.6 / 120
)
if ttc < 0:
pass
else:
intersection_ttc = min(intersection_ttc, ttc / 10)
intersection_distance = min(
intersection_distance, ego_closest_its_nv_distance / 100
)
# if to collide in 3s, make it slow down
if ttc < 2 or ego_closest_its_nv_distance < 6:
intersection_crash_flag = True
return (
lane_ttc,
lane_dist,
closest_lane_nv_rel_speed,
intersection_ttc,
intersection_distance,
closest_its_nv_rel_speed,
closest_its_nv_rel_pos,
)
def ego_ttc_calc(ego_lane_index, ttc_by_path, lane_dist):
# transform lane ttc and dist to make ego lane in the array center
# index need to be set to zero
# 4: [0,1], 3:[0], 2:[], 1:[4], 0:[3,4]
zero_index = [[3, 4], [4], [], [0], [0, 1]]
zero_index = zero_index[ego_lane_index]
ttc_by_path[zero_index] = 0
lane_ttc = np.roll(ttc_by_path, 2 - ego_lane_index)
lane_dist[zero_index] = 0
ego_lane_dist = np.roll(lane_dist, 2 - ego_lane_index)
return lane_ttc, ego_lane_dist
def get_distance_from_center(env_obs):
ego_state = env_obs.ego_vehicle_state
wp_paths = env_obs.waypoint_paths
closest_wps = [path[0] for path in wp_paths]
# distance of vehicle from center of lane
closest_wp = min(closest_wps, key=lambda wp: wp.dist_to(ego_state.position))
signed_dist_from_center = closest_wp.signed_lateral_error(ego_state.position)
lane_hwidth = closest_wp.lane_width * 0.5
norm_dist_from_center = signed_dist_from_center / lane_hwidth
return norm_dist_from_center
# ==================================================
# obs function
# ==================================================
def observation_adapter(env_obs):
"""
Transform the environment's observation into something more suited for your model
"""
ego_state = env_obs.ego_vehicle_state
wp_paths = env_obs.waypoint_paths
closest_wps = [path[0] for path in wp_paths]
# distance of vehicle from center of lane
closest_wp = min(closest_wps, key=lambda wp: wp.dist_to(ego_state.position))
signed_dist_from_center = closest_wp.signed_lateral_error(ego_state.position)
lane_hwidth = closest_wp.lane_width * 0.5
norm_dist_from_center = signed_dist_from_center / lane_hwidth
# wp heading errors in current lane in front of vehicle
indices = np.array([0, 1, 2, 3, 5, 8, 13, 21, 34, 50])
# solve case that wps are not enough, then assume the left heading to be same with the last valid.
wps_len = [len(path) for path in wp_paths]
max_len_lane_index = np.argmax(wps_len)
max_len = np.max(wps_len)
last_wp_index = 0
for i, wp_index in enumerate(indices):
if wp_index > max_len - 1:
indices[i:] = last_wp_index
break
last_wp_index = wp_index
sample_wp_path = [wp_paths[max_len_lane_index][i] for i in indices]
heading_errors = [
math.sin(wp.relative_heading(ego_state.heading)) for wp in sample_wp_path
]
ego_lane_index = closest_wp.lane_index
(
lane_ttc,
lane_dist,
closest_lane_nv_rel_speed,
intersection_ttc,
intersection_distance,
closest_its_nv_rel_speed,
closest_its_nv_rel_pos,
) = ttc_by_path(
ego_state, wp_paths, env_obs.neighborhood_vehicle_states, closest_wp
)
lane_ttc, lane_dist = ego_ttc_calc(ego_lane_index, lane_ttc, lane_dist)
return {
"distance_from_center": np.array([norm_dist_from_center]),
"heading_errors": np.array(heading_errors),
"speed": np.array([ego_state.speed * 3.6 / 120]),
"steering": np.array([ego_state.steering / (0.5 * math.pi)]),
"lane_ttc": np.array(lane_ttc),
"lane_dist": np.array(lane_dist),
"closest_lane_nv_rel_speed": np.array([closest_lane_nv_rel_speed]),
"intersection_ttc": np.array([intersection_ttc]),
"intersection_distance": np.array([intersection_distance]),
"closest_its_nv_rel_speed": np.array([closest_its_nv_rel_speed]),
"closest_its_nv_rel_pos": np.array(closest_its_nv_rel_pos),
}
# ==================================================
# reward function
# ==================================================
def reward_adapter(env_obs, env_reward):
"""
Here you can perform your reward shaping.
The default reward provided by the environment is the increment in
distance travelled. Your model will likely require a more
sophisticated reward function
"""
global lane_crash_flag
distance_from_center = get_distance_from_center(env_obs)
center_penalty = -np.abs(distance_from_center)
# penalise close proximity to lane cars
if lane_crash_flag:
crash_penalty = -5
else:
crash_penalty = 0
# penalise close proximity to intersection cars
if intersection_crash_flag:
crash_penalty -= 5
total_reward = np.sum([1.0 * env_reward])
total_penalty = np.sum([0.1 * center_penalty, 1 * crash_penalty])
return (total_reward + total_penalty) / 200.0
def action_adapter(model_action):
assert model_action in [0, 1, 2, 3]
return ACTION_CHOICE[model_action]
def info_adapter(reward, info):
return info
agent_interface = AgentInterface(
max_episode_steps=None,
waypoints=True,
# neighborhood < 60m
neighborhood_vehicles=NeighborhoodVehicles(radius=60),
# OGM within 64 * 0.25 = 16
ogm=OGM(64, 64, 0.25),
action=ActionSpaceType.Lane,
)
agent_spec = AgentSpec(
interface=agent_interface,
observation_adapter=observation_adapter,
reward_adapter=reward_adapter,
action_adapter=action_adapter,
info_adapter=info_adapter,
)
| 21,356 | 7,448 |
"""Test suite for optimizers.constant."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import ast
import pytest
from pycc.asttools import parse
from pycc.optimizers import constant
source = """
ONE = 1
TWO = 2
THREE = ONE + TWO
FOUR = THREE + ONE
FIVE = THREE + TWO
def return_const():
return FOUR
def return_var():
return FIVE
FIVE = FIVE + ONE
FIVE -= ONE
"""
@pytest.fixture
def node():
"""Get as AST node from the source."""
return parse.parse(source)
def test_constant_inliner(node):
"""Test that constant values are inlined."""
constant.optimize(node)
# Check assignment values using constants.
assert node.body[2].value.n == 3
assert node.body[3].value.n == 4
assert node.body[4].value.n == 5
# Check return val of const function.
assert isinstance(node.body[5].body[0].value, ast.Num)
assert node.body[5].body[0].value.n == 4
# Check return val of var function.
assert isinstance(node.body[6].body[0].value, ast.Name)
assert node.body[6].body[0].value.id == 'FIVE'
| 1,156 | 399 |
# -*- coding: utf-8 -*-
__author__ = 'alsbi'
from multiprocessing import Process
from websockify import WebSocketProxy
class Proxy():
port = {}
@classmethod
def get_port(cls,
uuid):
if uuid in Proxy.port:
return Proxy.port[uuid]
port = list(set(range(50000, 63000)) - set([port for port in Proxy.port.values()]))[0]
Proxy.port[uuid] = port
return port
def __init__(self,
target_host=None,
target_port=None,
listen_host=None,
uuid=None):
self.target_host = target_host
self.target_port = target_port
self.listen_host = listen_host
self.listen_port = Proxy.get_port(uuid = uuid)
self.uuid = uuid
def start_proxy(self):
def run():
cert = '/home/vnc/vnc_service/bin/server.crt'
key = '/home/vnc/vnc_service/bin/server.key'
params = {'ssl_only': True,
'cert': cert,
'key': key,
'target_port': self.target_port}
server = WebSocketProxy(**params)
server.start_server()
proc = Process(target = run)
proc.start()
return proc
class ProxyManager(object):
def __init__(self,
listen_host=None,
target_host=None):
self.listen_host = listen_host
self.target_host = target_host
self.list_proxy = {}
def create(self,
uuid=None,
port=None):
if not self.list_proxy.get(uuid):
proxy = Proxy(target_port = port,
target_host = self.target_host,
listen_host = self.listen_host,
uuid = uuid)
self.list_proxy[uuid] = proxy
proxy.start_proxy()
return proxy
else:
return self.list_proxy[uuid]
def delete(self, uuid):
if self.list_proxy.get(uuid):
del self.list_proxy[uuid]
| 2,079 | 613 |
from .massthings import MassThings
__red_end_user_data_statement__ = (
"This cog does not persistently store data or metadata about users."
# "<s>If you are using this cog, user data storage will probably be much less significant thing then API abuse</s>"
)
def setup(bot):
bot.add_cog(MassThings(bot))
| 318 | 99 |
# Copyright (c) 2010, individual contributors (see AUTHORS file)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import re
from django.core.exceptions import MiddlewareNotUsed
from observatory.settings import MEDIA_ROOT, JS_FILES, CSS_FILES
class CssSmasher(object):
def __init__(self):
cssdir = os.path.join(MEDIA_ROOT, 'css')
with open(os.path.join(MEDIA_ROOT, 'style.css'), 'w') as stylecss:
for file in os.listdir(cssdir):
with open(os.path.join(cssdir, file), 'r') as cssfile:
stylecss.write(re.sub(r"\s+", ' ', cssfile.read()))
with open(os.path.join(MEDIA_ROOT, "observatory.js"), 'w') as js:
for jsfile in [os.path.join(MEDIA_ROOT, path) for path in JS_FILES]:
with open(jsfile, "r") as jsdata:
js.write(re.sub(r"\s+", " ", jsdata.read()))
raise MiddlewareNotUsed
| 1,528 | 559 |
# flake8: noqa
"""
Copyright 2021 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
from okta.models import verification_method\
as verification_method
class AccessPolicyRuleApplicationSignOn(
OktaObject
):
"""
A class for AccessPolicyRuleApplicationSignOn objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.access = config["access"]\
if "access" in config else None
if "verificationMethod" in config:
if isinstance(config["verificationMethod"],
verification_method.VerificationMethod):
self.verification_method = config["verificationMethod"]
elif config["verificationMethod"] is not None:
self.verification_method = verification_method.VerificationMethod(
config["verificationMethod"]
)
else:
self.verification_method = None
else:
self.verification_method = None
else:
self.access = None
self.verification_method = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"access": self.access,
"verificationMethod": self.verification_method
}
parent_req_format.update(current_obj_format)
return parent_req_format
| 2,101 | 556 |
import discord
from discord.ext import commands
import json
from utils import error, RARITY_DICT
from parse_profile import get_profile_data
from extract_ids import extract_internal_names
# Create the master list!
from text_files.accessory_list import talisman_upgrades
# Get a list of all accessories
ACCESSORIES = []
with open("text_files/MASTER_ITEM_DICT.json", "r", encoding="utf-8") as file:
item_dict = json.load(file)
for item in item_dict:
if item_dict[item].get("rarity", False) and item_dict[item]["rarity"] != "UNKNOWN":
ACCESSORIES.append(item_dict[item])
# Now remove all the low tier ones
MASTER_ACCESSORIES = []
for accessory in ACCESSORIES:
if accessory["internal_name"] not in talisman_upgrades.keys():
MASTER_ACCESSORIES.append(accessory)
class missing_cog(commands.Cog):
def __init__(self, bot):
self.client = bot
@commands.command(aliases=['missing_accessories', 'accessories', 'miss', 'm'])
async def missing(self, ctx, username=None):
player_data = await get_profile_data(ctx, username)
if player_data is None:
return
username = player_data["username"]
accessory_bag = player_data.get("talisman_bag", None)
inv_content = player_data.get("inv_contents", {"data": []})
if not accessory_bag:
return await error(ctx, "Error, could not find this person's accessory bag", "Do they have their API disabled for this command?")
accessory_bag = extract_internal_names(accessory_bag["data"])
inventory = extract_internal_names(inv_content["data"])
missing = [x for x in MASTER_ACCESSORIES if x["internal_name"] not in accessory_bag+inventory]
if not missing:
return await error(ctx, f"Completion!", f"{username} already has all accessories!")
sorted_accessories = sorted(missing, key=lambda x: x["name"])[:42]
extra = "" if len(missing) <= 36 else f", showing the first {len(sorted_accessories)}"
embed = discord.Embed(title=f"Missing {len(missing)} accessories for {username}{extra}", colour=0x3498DB)
def make_embed(embed, acc_list):
text = ""
for item in acc_list:
internal_name, name, rarity, wiki_link = item.values()
wiki_link = "<Doesn't exist>" if not wiki_link else f"[wiki]({wiki_link})"
text += f"{RARITY_DICT[rarity]} {name}\nLink: {wiki_link}\n"
embed.add_field(name=f"{acc_list[0]['name'][0]}-{acc_list[-1]['name'][0]}", value=text, inline=True)
if len(sorted_accessories) < 6: # For people with only a few missing
make_embed(embed, sorted_accessories)
else:
list_length = int(len(sorted_accessories)/6)
for row in range(6):
row_accessories = sorted_accessories[row*list_length:(row+1)*list_length] # Get the first group out of 6
make_embed(embed, row_accessories)
embed.set_footer(text=f"Command executed by {ctx.author.display_name} | Community Bot. By the community, for the community.")
await ctx.send(embed=embed)
| 3,203 | 989 |
import os
import cv2
from lib import lib
images_dir = "images"
faces_dir = "images/faces"
def main():
files = os.listdir(images_dir)
for file_name in files:
full_path = images_dir + "/" + file_name
if not os.path.isfile(full_path):
continue
print("Processing " + full_path)
img = cv2.imread(full_path, cv2.IMREAD_UNCHANGED)
_, _, faces = lib.detect_faces(img, return_colour=True)
n = 1
for face in faces:
(x, y, w, h) = face
imgfile = "%s/%s__%d.jpg" % (faces_dir, file_name, n)
# imgfile = faces_dir + "/" + file_name + "__" + n + ".jpg"
cv2.imwrite(imgfile, img[y: y + w, x: x + h])
main()
| 725 | 266 |
"""
Google Sheets interaction lib. Configuration settings:
[gapps]
api_scope = https://www.googleapis.com/auth/spreadsheets
spreadsheet_id =
spreadsheet_range =
"""
import configparser
import traceback
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
# setup the config
config = configparser.RawConfigParser()
config.read('config.conf')
# If modifying these scopes, delete the file token.json.
api_scope = config.get('gapps', 'api_scope')
spreadsheet_id = config.get('gapps', 'spreadsheet_id')
# set up the credentials token if it doesnt exist, read it if it does
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('gapps-auth.json', api_scope)
creds = tools.run_flow(flow, store)
# build the sheets API service object
service = build('sheets', 'v4', http=creds.authorize(Http()))
# TODO: implement reading, example below
# request = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID, range=RANGE_NAME)
# response = request.execute()
# return response
def update_row(tab_name, spreadsheet_range, update_rows):
"""
Appends spreadsheet rows to a spreadsheet tab
:param tab_name:
:param spreadsheet_range:
:param update_rows:
:return:
"""
target_body = {
'majorDimension': 'ROWS',
'values': update_rows
}
range_string = f"{tab_name}!{spreadsheet_range}"
try:
request = service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range=range_string,
valueInputOption='RAW', body=target_body)
response = request.execute()
return response
except:
print(traceback.format_exc())
| 1,806 | 542 |
import sys
import pandas as pd
from sqlalchemy import create_engine
import re
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk import pos_tag, ne_chunk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('words')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
import pickle
def load_data(database_filepath):
'''
INPUT
database_filepath - the path where the database has been created
OUTPUT
X - the messages
Y - the categories (= labels)
Y.columns - the names of the categories
This function loads the data and prepare it in a X and Y function to be used by a ML model.
'''
# Get data
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql_table('DisasterMessages',engine)
# Split features from labels
X = df['message']
Y = df.drop(['id','message','original','genre'],axis=1)
return X, Y, Y.columns
def tokenize(text):
'''
INPUT
text - text to tokenize
OUTPUT
words_lemmed - tokenized text
This function transforms a text into something that can be read by a ML model:
1. Set to lower case
2. Remove punctuation
3. Split the text into words
4. Remove English stop words
5. Lemmatize words (reduce them according to the dictionary)
'''
# Normalize
text_normalized = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenize text
text_tokenized = word_tokenize(text_normalized)
# Remove stop words
words_no_stopwords = [w for w in text_tokenized if w not in stopwords.words("english")]
# Lemmatize
words_lemmed = [WordNetLemmatizer().lemmatize(w) for w in words_no_stopwords]
return words_lemmed
def build_model():
'''
INPUT
OUTPUT
cv - final model
This function creates a pipeline with a model to run on the data in order to categorize the messages automatically.
'''
# Build pipeline
randomforest = RandomForestClassifier()
pipeline = Pipeline([
('vect',CountVectorizer(tokenizer=tokenize)),
('tfidf',TfidfTransformer()),
('clf',MultiOutputClassifier(randomforest))
])
# Get best parameters with gridsearch
parameters = {
'clf__estimator__max_features': ['auto','log2'],
'clf__estimator__min_samples_leaf': [1,2]
}
cv = GridSearchCV(pipeline,parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
INPUT
model - model we want to evaluate (result from build_model() function)
X_test - test split of X
Y_test - test split of Y
category_names - the names of the possible categories
OUTPUT
This function scores the model for each category separately.
'''
# Get predictions
y_pred = model.predict(X_test)
# Get scores for each category
for i in range(len(category_names)):
print(category_names[i])
print(classification_report(Y_test.iloc[:,i], y_pred[:,i]))
def save_model(model, model_filepath):
'''
INPUT
model - model we want to evaluate (result from build_model() function)
model_filepath - where to save the pickle file
OUTPUT
This function outputs the model in a pickle file.
'''
pkl_filename = model_filepath
with open(pkl_filename, 'wb') as file:
pickle.dump(model, file)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | 5,142 | 1,574 |
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
def norm_ball(p):
step = np.pi/128
THETA = np.arange(0, 2*np.pi+step, step)
X = np.mat(np.zeros((2,len(THETA))))
for i, theta in enumerate(THETA):
x = (np.cos(theta), np.sin(theta))
a = (1/(np.abs(x[0])**p + np.abs(x[1])**p ))**(1/p)
X[:, i] = a*np.mat(x).T
return X
P = np.arange(0.25,5.25,0.25)
#print(X)
fig = plt.figure(figsize=(10,10))
NumPlotRows = 5
NumPlotCols = 4
for i,p in enumerate(P):
X = norm_ball(p=p)
plt.subplot(NumPlotRows, NumPlotCols, i+1)
plt.plot(X[0,:].T, X[1,:].T,'-',clip_on=False)
ax = fig.gca()
ax.set_xlim((-2,2))
ax.set_ylim((-2,2))
ax.axis('equal')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
#plt.plot(X[0,:].tolist())
for loc, spine in ax.spines.items():
spine.set_color('none') # don't draw spine
plt.title(p)
plt.show() | 976 | 457 |
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
import warnings
class Countries(APIClassTemplate):
"""
The Countries Object in the FMC.
"""
VALID_JSON_DATA = ["id", "name", "iso2", "iso3"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/countries"
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\- ]"""
def __init__(self, fmc, **kwargs):
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for Countries class.")
self.parse_kwargs(**kwargs)
self.type = "Country"
def post(self):
logging.info("POST method for API for Countries not supported.")
pass
def put(self):
logging.info("PUT method for API for Countries not supported.")
pass
def delete(self):
logging.info("DELETE method for API for Countries not supported.")
pass
class Country(Countries):
"""Dispose of this Class after 20210101."""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn("Deprecated: Country() should be called via Countries().")
super().__init__(fmc, **kwargs)
| 1,170 | 368 |
def check_dependencies(settings):
pyver = sys.version_info[:2]
if pyver < PYTHON_2_MIN_VERSION or (3, 0) <= pyver < PYTHON_3_MIN_VERSION:
dependency_error("Python %s or %s+ is required." % (PYTHON_2_MIN_VERSION_STR, PYTHON_3_MIN_VERSION_STR))
if not is_exe_in_path("node"):
dependency_error("node (from NodeJS) was not found. It must be " "installed from your package manager or from " "https://nodejs.org/")
if not os.path.exists("node_modules"):
dependency_error("The node_modules directory is missing. Please " "re-run `./setup.py develop` to install all NodeJS " "dependencies.")
for key in ("UGLIFYJS_BINARY", "LESS_BINARY", "BABEL_BINARY"):
path = settings.PIPELINE[key]
if not os.path.exists(path):
dependency_error("%s is missing. Please re-run `./setup.py " "develop` to install all NodeJS dependencies." % os.path.abspath(path))
if not has_module("pysvn") and not has_module("subvertpy"):
dependency_warning("Neither the subvertpy nor pysvn Python modules " "were found. Subversion integration will not work. " "For pysvn, see your package manager for the " "module or download from " "http://pysvn.tigris.org/project_downloads.html. " "For subvertpy, run `pip install subvertpy`. We " "recommend pysvn for better compatibility.")
if has_module("P4"):
try:
subprocess.call(["p4", "-h"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
except OSError:
dependency_warning("The p4 command not found. Perforce " "integration will not work. To enable support, " "download p4 from " "http://cdist2.perforce.com/perforce/ and " "place it in your PATH.")
else:
dependency_warning("The p4python module was not found. Perforce " "integration will not work. To enable support, " "run `pip install p4python`")
if not is_exe_in_path("hg"):
dependency_warning("The hg command was not found. Mercurial " "integration will not work. To enable support, " "run `pip install mercurial`")
if not is_exe_in_path("bzr"):
dependency_warning("The bzr command was not found. Bazaar integration " "will not work. To enable support, run " "`pip install bzr`")
if not is_exe_in_path("cvs"):
dependency_warning("The cvs command was not found. CVS integration " "will not work. To enable support, install cvs " "from your package manager or from " "http://www.nongnu.org/cvs/")
if not is_exe_in_path("git"):
dependency_warning("The git command not found. Git integration " "will not work. To enable support, install git " "from your package manager or from " "https://git-scm.com/downloads")
fail_if_missing_dependencies()
def upgrade_database():
"""Perform an upgrade of the database.
This will prompt the user for confirmation, with instructions on what
will happen. If the database is using SQLite3, it will be backed up
automatically, making a copy that contains the current timestamp.
Otherwise, the user will be prompted to back it up instead.
Returns:
bool:
``True`` if the user has confirmed the upgrade. ``False`` if they
have not.
"""
database = settings.DATABASES["default"]
db_name = database["NAME"]
backup_db_name = None
if "--no-backup" not in sys.argv and database["ENGINE"] == "django.db.backends.sqlite3" and os.path.exists(db_name):
backup_db_name = "%s.%s" % (db_name, datetime.now().strftime("%Y%m%d.%H%M%S"))
try:
shutil.copy(db_name, backup_db_name)
except Exception as e:
sys.stderr.write("Unable to make a backup of your database at " "%s: %s\n\n" % (db_name, e))
backup_db_name = None
if "--noinput" in sys.argv:
if backup_db_name:
print("Your existing database has been backed up to\n" "%s\n" % backup_db_name)
perform_upgrade = True
else:
message = "You are about to upgrade your database, which cannot be undone." "\n\n"
if backup_db_name:
message += "Your existing database has been backed up to\n" "%s" % backup_db_name
else:
message += "PLEASE MAKE A BACKUP BEFORE YOU CONTINUE!"
message += '\n\nType "yes" to continue or "no" to cancel: '
perform_upgrade = input(message).lower() in ("yes", "y")
print("\n")
if perform_upgrade:
print("===========================================================\n" 'Performing the database upgrade. Any "unapplied evolutions"\n' "will be handled automatically.\n" "===========================================================\n")
commands = [["evolve", "--noinput", "--execute"]]
for command in commands:
execute_from_command_line([sys.argv[0]] + command)
else:
print("The upgrade has been cancelled.\n")
sys.exit(1)
def main(settings, in_subprocess):
if dirname(settings.__file__) == os.getcwd():
sys.stderr.write("manage.py should not be run from within the " "'reviewboard' Python package directory.\n")
sys.stderr.write("Make sure to run this from the top of the " "Review Board source tree.\n")
sys.exit(1)
try:
command_name = sys.argv[1]
except IndexError:
command_name = None
if command_name in ("runserver", "test"):
if settings.DEBUG and not in_subprocess:
sys.stderr.write("Running dependency checks (set DEBUG=False " "to turn this off)...\n")
check_dependencies(settings)
if command_name == "runserver":
simple_server.ServerHandler.http_version = "1.1"
elif command_name not in ("evolve", "syncdb", "migrate"):
initialize()
if command_name == "upgrade":
upgrade_database()
return
execute_from_command_line(sys.argv)
def run():
sys.path.insert(0, dirname(dirname(abspath(__file__))))
try:
sys.path.remove(dirname(abspath(__file__)))
except ValueError:
pass
if str("DJANGO_SETTINGS_MODULE") not in os.environ:
in_subprocess = False
os.environ[str("DJANGO_SETTINGS_MODULE")] = str("reviewboard.settings")
else:
in_subprocess = True
if len(sys.argv) > 1 and sys.argv[1] == "test":
os.environ[str("RB_RUNNING_TESTS")] = str("1")
try:
pass
except ImportError as e:
sys.stderr.write("Error: Can't find the file 'settings.py' in the " "directory containing %r. It appears you've " "customized things.\n" "You'll have to run django-admin.py, passing it your " "settings module.\n" "(If the file settings.py does indeed exist, it's " "causing an ImportError somehow.)\n" % __file__)
sys.stderr.write("The error we got was: %s\n" % e)
sys.exit(1)
main(settings, in_subprocess)
if __name__ == "__main__":
run() | 6,281 | 2,171 |
from fastapi import APIRouter, Request
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
from pathlib import Path
TEMPLATES = (
str(Path(__file__).resolve().parent.parent.parent.parent) + r"\design\templates"
)
templates_view = APIRouter()
templates = Jinja2Templates(directory=TEMPLATES)
@templates_view.get("/base_html")
def base_html(request: Request, response_class=HTMLResponse):
return templates.TemplateResponse("index.html", {"request": request})
| 525 | 165 |
# 002_movies.py
def migrate(migrator, database, fake=False, **kwargs):
# an example class for demonstrating CRUD...
migrator.sql("""CREATE TABLE movies(
id serial PRIMARY KEY NOT NULL,
created timestamp not null default CURRENT_TIMESTAMP,
modified timestamp not null default CURRENT_TIMESTAMP,
creator uuid REFERENCES users(id),
title text,
director text
)""")
def rollback(migrator, database, fake=False, **kwargs):
migrator.sql("""DROP TABLE movies""")
| 528 | 164 |
from torch import nn
from tqdm import tqdm
import math
from train.utils import get_batch, repackage_hidden
def train(model, criterion, optimizer, num_tokens, train_data, epoch_no, epochs,
batch_size=256, sequence_length=6):
# Turn on training mode which enables dropout.
assert num_tokens is not None
model.train()
total_loss = 0.
loop = tqdm(enumerate(range(0, train_data.size(0) - 1, sequence_length)), total=len(train_data) // sequence_length,
position=0, leave=True)
counter = 0
for batch, i in loop:
data, targets = get_batch(train_data, i)
hidden = model.init_hidden(batch_size)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
model.zero_grad()
hidden = repackage_hidden(hidden)
# print('data:', data.shape)
# print('target:', targets.shape)
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, num_tokens), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
total_loss += loss.item()
counter += 1
loop.set_description(f"Epoch: [{epoch_no}/{epochs}]")
loop.set_postfix(loss=loss.item(), ppl=math.exp(loss.item()))
return total_loss / len(train_data)
| 1,528 | 482 |
import web
if "__main__" == __name__:
u = "https://www.ebay.de/sch/Mobel-Wohnen/11700/i.html?_from=R40&LH_BIN=1&_nkw=kaffeetisch&_dcat=38205&rt=nc&_mPrRngCbx=1&_udlo=4&_udhi=29"
r = web.fetch(u)
print r.status_code
| 225 | 135 |
"""Day 24: Lobby layout
Coordinate system taken from
https://www.redblobgames.com/grids/hexagons/#coordinates
"""
from collections import defaultdict
from day23 import REAL_INPUT
from typing import Dict, List, Tuple
MOVES = {
"e": [1, -1, 0],
"w": [-1, 1, 0],
"se": [0, -1, 1],
"sw": [-1, 0, 1],
"nw": [0, 1, -1],
"ne": [1, 0, -1],
}
TEST_INPUT = """sesenwnenenewseeswwswswwnenewsewsw
neeenesenwnwwswnenewnwwsewnenwseswesw
seswneswswsenwwnwse
nwnwneseeswswnenewneswwnewseswneseene
swweswneswnenwsewnwneneseenw
eesenwseswswnenwswnwnwsewwnwsene
sewnenenenesenwsewnenwwwse
wenwwweseeeweswwwnwwe
wsweesenenewnwwnwsenewsenwwsesesenwne
neeswseenwwswnwswswnw
nenwswwsewswnenenewsenwsenwnesesenew
enewnwewneswsewnwswenweswnenwsenwsw
sweneswneswneneenwnewenewwneswswnese
swwesenesewenwneswnwwneseswwne
enesenwswwswneneswsenwnewswseenwsese
wnwnesenesenenwwnenwsewesewsesesew
nenewswnwewswnenesenwnesewesw
eneswnwswnwsenenwnwnwwseeswneewsenese
neswnwewnwnwseenwseesewsenwsweewe
wseweeenwnesenwwwswnew""".splitlines()
EXPECTED_PART_TWO_RESULTS = {
2: 12,
3: 25,
4: 14,
5: 23,
6: 28,
7: 41,
8: 37,
9: 49,
10: 37,
20: 132,
30: 259,
40: 406,
50: 566,
60: 788,
70: 1106,
80: 1373,
90: 1844,
100: 2208,
}
with open("day24.txt") as infile:
REAL_INPUT = [line.strip() for line in infile]
def parse_step(line: str) -> Tuple[int, int, int]:
"""Follow a step from start to finish"""
index = 0
start = (0, 0, 0)
while index < len(line):
if line[index] in MOVES:
x, y, z = start
x1, y1, z1 = MOVES[line[index]]
start = (x + x1, y + y1, z + z1)
index += 1
elif line[index : index + 2] in MOVES:
x, y, z = start
x1, y1, z1 = MOVES[line[index : index + 2]]
start = (x + x1, y + y1, z + z1)
index += 2
else:
raise ValueError(f"Unknown step {line[index:index + 2]}")
return start
def part_one(puzzle: List[str]) -> int:
grid: Dict[Tuple[int], bool] = defaultdict(lambda: False)
for line in puzzle:
coordinates = parse_step(line)
grid[coordinates] = not grid[coordinates]
return sum(grid.values())
def get_neighbor_coordinates(pos: Tuple[int, int, int]) -> List[Tuple[int, int, int]]:
result = []
x, y, z = pos
for x1, y1, z1 in MOVES.values():
result.append((x + x1, y + y1, z + z1))
return result
def next_state(
pos: Tuple[int, int, int], grid: Dict[Tuple[int, int, int], bool]
) -> bool:
neighbors = get_neighbor_coordinates(pos)
active_neighbors = sum(grid[neighbor] for neighbor in neighbors)
state = grid[pos]
if state:
if active_neighbors not in {1, 2}:
# if the tile is black and there are 0 or more than two active neighbors,
# flip it
state = False
else:
if active_neighbors == 2:
# if it's white and there are exactly two black tiles next to it, flip it
state = True
return state
def take_turn(
grid: Dict[Tuple[int, int, int], bool]
) -> Dict[Tuple[int, int, int], bool]:
x_values = sorted(grid)
y_values = sorted(grid, key=lambda k: k[1])
z_values = sorted(grid, key=lambda k: k[2])
min_x = x_values[0][0] - 1
max_x = x_values[-1][0] + 2
min_y = y_values[0][1] - 1
max_y = y_values[-1][1] + 2
min_z = z_values[0][2] - 1
max_z = z_values[-1][2] + 2
new_grid = defaultdict(lambda: False)
new_grid.update(
{
(x, y, z): next_state((x, y, z), grid)
for x in range(min_x, max_x)
for y in range(min_y, max_y)
for z in range(min_z, max_z)
# this conditional is crucial
if x + y + z == 0
}
)
return new_grid
def part_two(puzzle: List[str], days: int = 100) -> int:
grid: Dict[Tuple[int], bool] = defaultdict(lambda: False)
for line in puzzle:
coordinates = parse_step(line)
grid[coordinates] = not grid[coordinates]
testing = puzzle == TEST_INPUT
for day in range(days):
grid = take_turn(grid)
if testing:
try:
expected_result = EXPECTED_PART_TWO_RESULTS[day + 1]
except KeyError:
pass
else:
assert expected_result == sum(grid.values()), (
day + 1,
sum(grid.values()),
grid,
)
if not day % 10:
# this code gets slower as the days go on
print(f"in progress: day {day}")
return sum(grid.values())
def main():
assert part_one(TEST_INPUT) == 10, part_one(TEST_INPUT)
print("part 1 result:", part_one(REAL_INPUT))
assert part_two(TEST_INPUT) == 2208, part_two(TEST_INPUT)
print("part 2 result", part_two(REAL_INPUT))
if __name__ == "__main__":
main()
| 4,961 | 1,991 |
# -*- coding: utf-8 -*-
from .reader import SteinerTreeProblem, Reader, ReaderORLibrary
from .graph import UndirectedWeightedGraph
from .graph import UndirectedWeightedGraph as UWGraph
from .graph import UndirectedWeightedGraph as Graph
from .graph import UndirectedGraph
from .graph import UndirectedGraph as UGraph
__all__ = [
"Graph",
"UndirectedGraph",
"UGraph",
"Reader",
"ReaderORLibrary",
"SteinerTreeProblem"
]
| 464 | 150 |
"""create user table
Revision ID: 37881a97d680
Revises: d8f500d9168
Create Date: 2014-02-10 15:52:50.366173
"""
# revision identifiers, used by Alembic.
revision = '37881a97d680'
down_revision = 'd8f500d9168'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'users',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('fullname', sa.String(200), nullable=False),
sa.Column('email', sa.String(100), nullable=False),
sa.Column('last_login', sa.DateTime, nullable=True),
sa.Column(
'is_superuser',
sa.Boolean,
nullable=False,
server_default='0'
)
)
op.create_index('idx_email', 'users', ['email'])
def downgrade():
op.drop_index('idx_email', 'users')
op.drop_table('users')
| 840 | 334 |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
#from dotenv import find_dotenv, load_dotenv
import pandas as pd
import tqdm
import numpy as np
#@click.command()
#@click.argument('input_filepath', type=click.Path(exists=True))
#@click.argument('output_filepath', type=click.Path())
def main(
input_filepath,
output_filepath,
):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
data = pd.read_csv(input_filepath, sep = ',', encoding = 'utf-8')
data = data.astype(str)
data = data.drop_duplicates(subset=['Material', 'Nºdopedido', 'IVAMIRO'])
data = data.astype(str)
strip_columns = [
'Material',
'Filial',
'IVAPC',
'PEP',
'Fornecedor',
'Contrato',
'UF',
'TpImposto',
'IVAMIRO',
'Nºdopedido'
]
for col in tqdm.tqdm(strip_columns):
try:
data[col] = data[col].str.strip(' ').str.lstrip('0')
except:
#change to warn in the future
print('{} not in data.columns'.format(col))
#data = data.replace({'': np.nan})
#data = data.replace({'nan': np.nan})
data['OrderType'] = data['Nºdopedido'].str[0:2]
data = data.replace(to_replace = {'nan': ''})
data.to_csv(output_filepath)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
#project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
#load_dotenv(find_dotenv())
today = int(pd.to_datetime('today').timestamp())
main(
r'C:\Users\User Ambev\Desktop\Célula de analytics\Projetos\iva-apfj\data\external\history.csv',
r'C:\Users\User Ambev\Desktop\Célula de analytics\Projetos\iva-apfj\data\external\hist_prep.csv'#.format(today)
)
| 2,235 | 762 |
import asyncio
import time
import zmq
import zmq.asyncio
from pymunk import Vec2d
from zmq import Socket
from .constants import SERVER_TICK
from .events.events import PlayerEvent
from .events.movement import apply_movement
from .events.states import GameState, PlayerState
async def main():
future = asyncio.Future()
game_state = GameState(
player_states=[PlayerState()]
)
ctx = zmq.asyncio.Context()
sock_b = ctx.socket(zmq.PULL)
sock_b.bind('tcp://*:25001')
task_b = asyncio.create_task(
update_from_client(game_state, sock_b)
)
sock_c = ctx.socket(zmq.PUB)
sock_c.bind('tcp://*:25000')
task_c = asyncio.create_task(
push_game_state(game_state, sock_c)
)
try:
await asyncio.wait(
[task_b, task_c, future],
return_when=asyncio.FIRST_COMPLETED
)
except asyncio.CancelledError:
print('Cancelled')
finally:
sock_b.close()
sock_c.close()
ctx.destroy(linger=1)
async def update_from_client(game_state: GameState, sock: Socket) -> None:
try:
while True:
msg = dict(await sock.recv_json())
event_dict = msg['event']
# print(event_dict)
event = PlayerEvent(**event_dict)
update_game_state(game_state, event)
except asyncio.CancelledError:
print("Cancelled")
pass
def update_game_state(game_state: GameState, event: PlayerEvent) -> None:
for ps in game_state.player_states:
pos = Vec2d(ps.x, ps.y)
dt = time.time() - ps.updated
new_pos = apply_movement(ps.speed, dt, pos, event)
ps.x, ps.y = new_pos.x, new_pos.y
ps.updated = time.time()
async def push_game_state(game_state: GameState, sock: Socket) -> None:
try:
while True:
sock.send_string(game_state.to_json())
await asyncio.sleep(1 / SERVER_TICK)
except asyncio.CancelledError:
pass
| 1,989 | 699 |
from django.conf.urls import include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path
import mds.apis.agency_api.v0_3.urls
import mds.authent.urls
urlpatterns = [
# TODO(hcauwelier) Backwards compat, to remove with MDS 0.3
path(
"mds/v0.x/",
include(
(
mds.apis.agency_api.v0_3.urls.get_url_patterns(prefix="mds/v0.x"),
"agency",
)
),
),
# MDS 0.3
path(
"mds/v0.3/",
include(
(
mds.apis.agency_api.v0_3.urls.get_url_patterns(prefix="mds/v0.3"),
"agency-0.3",
)
),
),
path("admin/", admin.site.urls),
path("authent/", include(mds.authent.urls, namespace="authent")),
# oauth2_provider gives views to manage applications.
# These views require a logged in user.
path(
"accounts/login/",
auth_views.LoginView.as_view(template_name="admin/login.html"),
),
]
| 1,045 | 360 |
from cerberus import errors
from cerberus.tests import assert_fail, assert_success
def test_schema(validator):
field = 'a_dict'
subschema_field = 'address'
assert_success({field: {subschema_field: 'i live here', 'city': 'in my own town'}})
assert_fail(
schema={
field: {
'type': 'dict',
'schema': {
subschema_field: {'type': 'string'},
'city': {'type': 'string', 'required': True},
},
}
},
document={field: {subschema_field: 34}},
validator=validator,
error=(
field,
(field, 'schema'),
errors.SCHEMA,
validator.schema['a_dict']['schema'],
),
child_errors=[
(
(field, subschema_field),
(field, 'schema', subschema_field, 'type'),
errors.TYPE,
('string',),
),
(
(field, 'city'),
(field, 'schema', 'city', 'required'),
errors.REQUIRED_FIELD,
True,
),
],
)
assert field in validator.errors
assert subschema_field in validator.errors[field][-1]
assert (
errors.BasicErrorHandler.messages[errors.TYPE.code].format(
constraint=('string',)
)
in validator.errors[field][-1][subschema_field]
)
assert 'city' in validator.errors[field][-1]
assert (
errors.BasicErrorHandler.messages[errors.REQUIRED_FIELD.code]
in validator.errors[field][-1]['city']
)
def test_options_passed_to_nested_validators(validator):
validator.allow_unknown = True
assert_success(
schema={'sub_dict': {'type': 'dict', 'schema': {'foo': {'type': 'string'}}}},
document={'sub_dict': {'foo': 'bar', 'unknown': True}},
validator=validator,
)
| 1,945 | 557 |
import logging
logger = logging.getLogger(__name__)
from .profiler import Profiler
from . import config, resnet
profiler = Profiler("classify.py")
import os
import cv2
import dlib
import numpy as np
import tensorflow as tf
from imutils.face_utils import FaceAligner
from imutils.face_utils import rect_to_bb
profiler.tick("Libraries imported")
def get_gender(gender):
if gender == 0:
return "female"
elif gender == 1:
return "male"
else:
return "unknown"
class Classify:
def __init__(self, model_path, predictor_path, use_cuda = False):
self.model_path = model_path
self.predictor_path = predictor_path
self.use_cuda = use_cuda
if not use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
self._create_session()
def classify(self, path):
aligned_image, image, rect_nums, XY = self._load_image(path)
profiler.tick("Loaded image")
ages, genders = self._evaluate(aligned_image)
profiler.tick("Evaluated image")
if config.PROFILE:
profiler.dump_events()
return {
"ages" : ages.tolist(),
"genders" : [get_gender(g) for g in genders.tolist()]
}
def _create_session(self):
logger.debug("Creating session")
with tf.Graph().as_default():
self.session = tf.Session()
images_pl = tf.placeholder(tf.float32, shape=[None, 160, 160, 3], name='input_image')
images = tf.map_fn(lambda frame: tf.reverse_v2(frame, [-1]), images_pl) #BGR TO RGB
images_norm = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), images)
train_mode = tf.placeholder(tf.bool)
age_logits, gender_logits, _ = resnet.inference(images_norm, keep_probability=0.8,
phase_train=train_mode,
weight_decay=1e-5)
gender = tf.argmax(tf.nn.softmax(gender_logits), 1)
age_ = tf.cast(tf.constant([i for i in range(0, 101)]), tf.float32)
age = tf.reduce_sum(tf.multiply(tf.nn.softmax(age_logits), age_), axis=1)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
self.session.run(init_op)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(self.model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(self.session, ckpt.model_checkpoint_path)
self._age = age
self._gender = gender
self._images_pl = images_pl
self._train_mode = train_mode
logger.debug("Session restorted")
else:
logger.debug("Could not create session")
profiler.tick("Created session")
def _evaluate(self, aligned_images):
return self.session.run(
[self._age, self._gender],
feed_dict = {
self._images_pl: aligned_images,
self._train_mode: False
}
)
def _load_image(self, image_path):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(self.predictor_path)
fa = FaceAligner(predictor, desiredFaceWidth=160)
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
rect_nums = len(rects)
XY, aligned_images = [], []
if rect_nums == 0:
aligned_images.append(image)
return aligned_images, image, rect_nums, XY
else:
for i in range(rect_nums):
aligned_image = fa.align(image, gray, rects[i])
aligned_images.append(aligned_image)
(x, y, w, h) = rect_to_bb(rects[i])
image = cv2.rectangle(image, (x, y), (x + w, y + h), color=(255, 0, 0), thickness=2)
XY.append((x, y))
return np.array(aligned_images), image, rect_nums, XY | 4,216 | 1,318 |
import unittest
class AddTests(unittest.TestCase):
def test_other_two_plus_one_is_three_passed(self):
print("checking 2 + 1")
self.assertEqual(3, 2 + 1)
def test_other_two_plus_two_is_five_failed(self):
print("checking 2 + 2")
self.assertEqual(5, 2 + 2)
@unittest.skip("Skipped for a very important reason")
def test_other_two_plus_zero_is_two_skipped(self):
print("checking 2 + 0")
self.assertEqual(2, 2 + 0)
| 478 | 183 |
import tensorflow as tf
import argument_sr
from os.path import join
from PIL import Image
import os
from numpy import array
"""
fliker image data by pil
"""
def pil_batch_queue():
lrs ,hr2s , hr4s = argument_sr.options.get_pil_file_list()
lrs = array(lrs)
hr2s = array(hr2s)
hr4s = array(hr4s)
lrs = tf.convert_to_tensor(lrs,dtype=tf.float32)
hr2s = tf.convert_to_tensor(hr2s, dtype=tf.float32)
hr4s = tf.convert_to_tensor(hr4s, dtype=tf.float32)
lrs = tf.expand_dims(lrs,3)
hr2s = tf.expand_dims(hr2s, 3)
hr4s = tf.expand_dims(hr4s, 3)
return lrs,hr2s,hr4s,None
"""
SET5 test
"""
def pil_single_test_SET5(path):
a,b,c = argument_sr.options.get_set5(path)
lrs = array(a)
hr2s = array(b)
hr4s = array(c)
lrs = tf.convert_to_tensor(lrs, dtype=tf.float32)
hr2s = tf.convert_to_tensor(hr2s, dtype=tf.float32)
hr4s = tf.convert_to_tensor(hr4s, dtype=tf.float32)
lrs = tf.expand_dims(lrs, 0)
hr2s = tf.expand_dims(hr2s, 0)
hr4s = tf.expand_dims(hr4s, 0)
lrs = tf.expand_dims(lrs, 3)
hr2s = tf.expand_dims(hr2s, 3)
hr4s = tf.expand_dims(hr4s, 3)
return lrs, hr2s, hr4s, None
def RGB_to_Tcrbr_Y(tensor):
"""
Args:
tensor: 要转换的图片
Returns:
"""
with tf.name_scope("rgb_to_tcrbr"):
print(tensor)
R = tensor[:, : ,0]
G = tensor[:, :, 1]
B = tensor[:, :, 2]
L = 0.299*R+0.587*G+0.114*B
# print(L)
return tf.expand_dims(L,2)
def get_all_file(path, endFormat, withPath=True):
"""
寻找path 路径下以 在endFormate数组中出现的文件格式的文件
Args:
path: 路径
endFormat: [] 包含这些类型的 format
withPath: 返回的路径是否带之间的路径
Returns:
所有符合条件的文件名
"""
dir = []
for root, dirs, files in os.walk(path):
for file in files:
if True in [file.endswith(x) for x in endFormat]:
filename = join(path, file) if withPath else file
dir.append(filename)
return dir
"""
普通的JPG 训练集
"""
def batch_queue_for_training_normal(data_path):
num_channel = argument_sr.options.input_channel
image_height = argument_sr.options.height
image_width = argument_sr.options.width
batch_size = argument_sr.options.batch_size
threads_num = argument_sr.options.num_threads
min_queue_examples = argument_sr.options.min_after_dequeue
filename_queue = tf.train.string_input_producer(get_all_file(path=data_path, endFormat=['jpg']))
file_reader = tf.WholeFileReader()
_, image_file = file_reader.read(filename_queue)
patch = tf.image.decode_jpeg(image_file, 3)
patch = tf.image.convert_image_dtype(patch, dtype=tf.float32)
# patch = RGB_to_Tcrbr_Y(patch)
image_HR8 = tf.random_crop(patch, [image_height, image_width, num_channel])
image_HR4 = tf.image.resize_images(image_HR8, [int(image_height / 2), int(image_width / 2)],
method=tf.image.ResizeMethod.BICUBIC)
image_HR2 = tf.image.resize_images(image_HR8, [int(image_height / 4), int(image_width / 4)],
method=tf.image.ResizeMethod.BICUBIC)
image_LR = tf.image.resize_images(image_HR8, [int(image_height / 8), int(image_width / 8)],
method=tf.image.ResizeMethod.BICUBIC)
low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch = tf.train.shuffle_batch(
[image_LR, image_HR2, image_HR4, image_HR8],
batch_size=batch_size,
num_threads=threads_num,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
return low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch
def save_image(image, path):
with open(path, "wb") as file:
file.write(image)
"""
使用特殊的大量训练集
"""
def batch_queue_for_training_mkdir():
num_channel = argument_sr.options.input_channel
image_height = argument_sr.options.height
image_width = argument_sr.options.width
batch_size = argument_sr.options.batch_size
threads_num = argument_sr.options.num_threads
filename_queue = tf.train.string_input_producer(argument_sr.options.get_file_list())
file_reader = tf.WholeFileReader()
_, image_file = file_reader.read(filename_queue)
patch = tf.image.decode_jpeg(image_file, 3)
patch = tf.image.convert_image_dtype(patch, dtype=tf.float32)
patch = RGB_to_Tcrbr_Y(patch)
image_HR8 = tf.random_crop(patch, [image_height, image_width, num_channel])
image_HR4 = tf.image.resize_images(image_HR8, [int(image_height / 2), int(image_width / 2)],
method=tf.image.ResizeMethod.BICUBIC)
image_HR2 = tf.image.resize_images(image_HR8, [int(image_height / 4), int(image_width / 4)],
method=tf.image.ResizeMethod.BICUBIC)
image_LR = tf.image.resize_images(image_HR8, [int(image_height / 8), int(image_width / 8)],
method=tf.image.ResizeMethod.BICUBIC)
low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch = tf.train.batch(
[image_LR, image_HR2, image_HR4, image_HR8],
batch_size=batch_size,
num_threads=threads_num,
capacity=3 * batch_size)
filename_queue.close()
return low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch
def dataTest():
low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch = batch_queue_for_training_normal(argument.options.test_data_path)
images = []
for i in range(16):
temp = tf.image.convert_image_dtype(high2_res_batch[i], dtype=tf.uint8)
temp = tf.image.encode_jpeg(temp)
images.append(temp)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
low = sess.run(images)
for i in range(16):
save_image(low[i], './test/low' + str(i) + '.jpg')
def get_image_info():
paths = argument_sr.options.get_file_list()
for path in paths:
im = Image.open(path)
height = min(im.size[1], im.size[0])
width = max(im.size[1], im.size[0])
print(width, height)
if __name__ == '__main__':
print(pil_single_test_SET5('./SET5/baby.png'))
| 6,318 | 2,456 |
import logging
LOG_LEVELS = {
logging.NOTSET: 'sample',
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'warning',
logging.ERROR: 'error',
logging.FATAL: 'fatal',
}
DEFAULT_LOG_LEVEL = 'error'
LOG_LEVELS_MAP = {v: k for k, v in LOG_LEVELS.items()}
LEVEL_LABELS = {
logging.NOTSET: 'dark',
logging.DEBUG: 'dark',
logging.INFO: 'success',
logging.WARNING: 'warning',
logging.ERROR: 'danger',
logging.FATAL: 'danger',
}
| 483 | 189 |
import os
import logging
import re
from datetime import datetime
from stat import ST_CTIME
from zipfile import ZipFile
from dipper import config
from dipper.sources.Source import Source
from dipper.models.Model import Model
from dipper.models.assoc.InteractionAssoc import InteractionAssoc
from dipper.models.Dataset import Dataset
__author__ = 'nicole'
logger = logging.getLogger(__name__)
BGDL = 'http://thebiogrid.org/downloads/archives/Latest%20Release'
class BioGrid(Source):
"""
Biogrid interaction data
"""
# TODO write up class summary for docstring
files = {
'interactions': {
'file': 'interactions.mitab.zip',
'url': BGDL + '/BIOGRID-ALL-LATEST.mitab.zip'},
'identifiers': {
'file': 'identifiers.tab.zip',
'url': BGDL + '/BIOGRID-IDENTIFIERS-LATEST.tab.zip'}
}
# biogrid-specific identifiers for use in subsetting identifier mapping
biogrid_ids = [
106638, 107308, 107506, 107674, 107675, 108277, 108506, 108767, 108814,
108899, 110308, 110364, 110678, 111642, 112300, 112365, 112771, 112898,
199832, 203220, 247276, 120150, 120160, 124085]
def __init__(self, graph_type, are_bnodes_skolemized, tax_ids=None):
super().__init__(graph_type, are_bnodes_skolemized, 'biogrid')
self.tax_ids = tax_ids
self.dataset = Dataset(
'biogrid', 'The BioGrid', 'http://thebiogrid.org/', None,
'http://wiki.thebiogrid.org/doku.php/terms_and_conditions')
# Defaults
# our favorite animals
# taxids = [9606,10090,10116,7227,7955,6239,8355]
if self.tax_ids is None:
self.tax_ids = [9606, 10090, 7955]
if 'test_ids' not in config.get_config() or \
'gene' not in config.get_config()['test_ids']:
logger.warning("not configured with gene test ids.")
else:
self.test_ids = config.get_config()['test_ids']['gene']
# data-source specific warnings
# (will be removed when issues are cleared)
logger.warning(
"several MI experimental codes do not exactly map to ECO; "
"using approximations.")
return
def fetch(self, is_dl_forced=False):
"""
:param is_dl_forced:
:return: None
"""
self.get_files(is_dl_forced)
# the version number is encoded in the filename in the zip.
# for example, the interactions file may unzip to
# BIOGRID-ALL-3.2.119.mitab.txt, where the version number is 3.2.119
f = '/'.join((self.rawdir, self.files['interactions']['file']))
st = os.stat(f)
filedate = datetime.utcfromtimestamp(st[ST_CTIME]).strftime("%Y-%m-%d")
with ZipFile(f, 'r') as myzip:
flist = myzip.namelist()
# assume that the first entry is the item
fname = flist[0]
# get the version from the filename
version = \
re.match(r'BIOGRID-ALL-(\d+\.\d+\.\d+)\.mitab.txt', fname)
myzip.close()
self.dataset.setVersion(filedate, str(version.groups()[0]))
return
def parse(self, limit=None):
"""
:param limit:
:return:
"""
if self.testOnly:
self.testMode = True
self._get_interactions(limit)
self._get_identifiers(limit)
logger.info("Loaded %d test graph nodes", len(self.testgraph))
logger.info("Loaded %d full graph nodes", len(self.graph))
return
def _get_interactions(self, limit):
logger.info("getting interactions")
line_counter = 0
f = '/'.join((self.rawdir, self.files['interactions']['file']))
myzip = ZipFile(f, 'r')
# assume that the first entry is the item
fname = myzip.namelist()[0]
matchcounter = 0
with myzip.open(fname, 'r') as csvfile:
for line in csvfile:
# skip comment lines
if re.match(r'^#', line.decode()):
logger.debug("Skipping header line")
continue
line_counter += 1
line = line.decode().strip()
# print(line)
(interactor_a, interactor_b, alt_ids_a, alt_ids_b, aliases_a,
aliases_b, detection_method, pub_author, pub_id, taxid_a,
taxid_b, interaction_type, source_db, interaction_id,
confidence_val) = line.split('\t')
# get the actual gene ids,
# typically formated like: gene/locuslink:351|BIOGRID:106848
gene_a_num = re.search(
r'locuslink\:(\d+)\|?', interactor_a).groups()[0]
gene_b_num = re.search(
r'locuslink\:(\d+)\|?', interactor_b).groups()[0]
if self.testMode:
g = self.testgraph
# skip any genes that don't match our test set
if (int(gene_a_num) not in self.test_ids) or\
(int(gene_b_num) not in self.test_ids):
continue
else:
g = self.graph
# when not in test mode, filter by taxon
if int(re.sub(r'taxid:', '', taxid_a.rstrip())) not in\
self.tax_ids or\
int(re.sub(
r'taxid:', '', taxid_b.rstrip())) not in\
self.tax_ids:
continue
else:
matchcounter += 1
gene_a = 'NCBIGene:'+gene_a_num
gene_b = 'NCBIGene:'+gene_b_num
# get the interaction type
# psi-mi:"MI:0407"(direct interaction)
int_type = re.search(r'MI:\d+', interaction_type).group()
rel = self._map_MI_to_RO(int_type)
# scrub pubmed-->PMID prefix
pub_id = re.sub(r'pubmed', 'PMID', pub_id)
# remove bogus whitespace
pub_id = pub_id.strip()
# get the method, and convert to evidence code
det_code = re.search(r'MI:\d+', detection_method).group()
evidence = self._map_MI_to_ECO(det_code)
# note that the interaction_id is some kind of internal biogrid
# identifier that does not map to a public URI.
# we will construct a monarch identifier from this
assoc = InteractionAssoc(g, self.name, gene_a, gene_b, rel)
assoc.add_evidence(evidence)
assoc.add_source(pub_id)
assoc.add_association_to_graph()
if not self.testMode and (
limit is not None and line_counter > limit):
break
myzip.close()
return
def _get_identifiers(self, limit):
"""
This will process the id mapping file provided by Biogrid.
The file has a very large header, which we scan past,
then pull the identifiers, and make equivalence axioms
:param limit:
:return:
"""
logger.info("getting identifier mapping")
line_counter = 0
f = '/'.join((self.rawdir, self.files['identifiers']['file']))
myzip = ZipFile(f, 'r')
# assume that the first entry is the item
fname = myzip.namelist()[0]
foundheader = False
# TODO align this species filter with the one above
# speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster,
# Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',')
speciesfilters = 'Homo sapiens,Mus musculus'.split(',')
with myzip.open(fname, 'r') as csvfile:
for line in csvfile:
# skip header lines
if not foundheader:
if re.match(r'BIOGRID_ID', line.decode()):
foundheader = True
continue
line = line.decode().strip()
# BIOGRID_ID
# IDENTIFIER_VALUE
# IDENTIFIER_TYPE
# ORGANISM_OFFICIAL_NAME
# 1 814566 ENTREZ_GENE Arabidopsis thaliana
(biogrid_num, id_num, id_type,
organism_label) = line.split('\t')
if self.testMode:
g = self.testgraph
# skip any genes that don't match our test set
if int(biogrid_num) not in self.biogrid_ids:
continue
else:
g = self.graph
model = Model(g)
# for each one of these,
# create the node and add equivalent classes
biogrid_id = 'BIOGRID:'+biogrid_num
prefix = self._map_idtype_to_prefix(id_type)
# TODO make these filters available as commandline options
# geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC,
# WormBase,XenBase,ENSEMBL,miRBase'.split(',')
geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'.split(',')
# proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein'
if (speciesfilters is not None) \
and (organism_label.strip() in speciesfilters):
line_counter += 1
if (geneidtypefilters is not None) \
and (prefix in geneidtypefilters):
mapped_id = ':'.join((prefix, id_num))
model.addEquivalentClass(biogrid_id, mapped_id)
# this symbol will only get attached to the biogrid class
elif id_type == 'OFFICIAL_SYMBOL':
model.addClassToGraph(biogrid_id, id_num)
# elif (id_type == 'SYNONYM'):
# FIXME - i am not sure these are synonyms, altids?
# gu.addSynonym(g,biogrid_id,id_num)
if not self.testMode and limit is not None \
and line_counter > limit:
break
myzip.close()
return
@staticmethod
def _map_MI_to_RO(mi_id):
rel = InteractionAssoc.interaction_object_properties
mi_ro_map = {
# colocalization
'MI:0403': rel['colocalizes_with'],
# direct interaction
'MI:0407': rel['interacts_with'],
# synthetic genetic interaction defined by inequality
'MI:0794': rel['genetically_interacts_with'],
# suppressive genetic interaction defined by inequality
'MI:0796': rel['genetically_interacts_with'],
# additive genetic interaction defined by inequality
'MI:0799': rel['genetically_interacts_with'],
# association
'MI:0914': rel['interacts_with'],
# physical association
'MI:0915': rel['interacts_with']
}
ro_id = rel['interacts_with'] # default
if mi_id in mi_ro_map:
ro_id = mi_ro_map.get(mi_id)
return ro_id
@staticmethod
def _map_MI_to_ECO(mi_id):
eco_id = 'ECO:0000006' # default to experimental evidence
mi_to_eco_map = {
'MI:0018': 'ECO:0000068', # yeast two-hybrid
'MI:0004': 'ECO:0000079', # affinity chromatography
'MI:0047': 'ECO:0000076', # far western blotting
'MI:0055': 'ECO:0000021', # should be FRET, but using physical_interaction FIXME
'MI:0090': 'ECO:0000012', # desired: protein complementation, using: functional complementation
'MI:0096': 'ECO:0000085', # desired: pull down, using: immunoprecipitation
'MI:0114': 'ECO:0000324', # desired: x-ray crystallography, using: imaging assay
'MI:0254': 'ECO:0000011', # desired: genetic interference, using: genetic interaction evidence
'MI:0401': 'ECO:0000172', # desired: biochemical, using: biochemical trait evidence
'MI:0415': 'ECO:0000005', # desired: enzymatic study, using: enzyme assay evidence
'MI:0428': 'ECO:0000324', # imaging
'MI:0686': 'ECO:0000006', # desired: unspecified, using: experimental evidence
'MI:1313': 'ECO:0000006' # None?
}
if mi_id in mi_to_eco_map:
eco_id = mi_to_eco_map.get(mi_id)
else:
logger.warning(
"unmapped code %s. Defaulting to experimental_evidence", mi_id)
return eco_id
@staticmethod
def _map_idtype_to_prefix(idtype):
"""
Here we need to reformat the BioGrid source prefixes
to standard ones used in our curie-map.
:param idtype:
:return:
"""
prefix = idtype
idtype_to_prefix_map = {
'XENBASE': 'XenBase',
'TREMBL': 'TrEMBL',
'MGI': 'MGI',
'REFSEQ_DNA_ACCESSION': 'RefSeqNA',
'MAIZEGDB': 'MaizeGDB',
'BEEBASE': 'BeeBase',
'ENSEMBL': 'ENSEMBL',
'TAIR': 'TAIR',
'GENBANK_DNA_GI': 'NCBIgi',
'CGNC': 'CGNC',
'RGD': 'RGD',
'GENBANK_GENOMIC_DNA_GI': 'NCBIgi',
'SWISSPROT': 'Swiss-Prot',
'MIM': 'OMIM',
'FLYBASE': 'FlyBase',
'VEGA': 'VEGA',
'ANIMALQTLDB': 'AQTLDB',
'ENTREZ_GENE_ETG': 'ETG',
'HPRD': 'HPRD',
'APHIDBASE': 'APHIDBASE',
'GENBANK_PROTEIN_ACCESSION': 'NCBIProtein',
'ENTREZ_GENE': 'NCBIGene',
'SGD': 'SGD',
'GENBANK_GENOMIC_DNA_ACCESSION': 'NCBIGenome',
'BGD': 'BGD',
'WORMBASE': 'WormBase',
'ZFIN': 'ZFIN',
'DICTYBASE': 'dictyBase',
'ECOGENE': 'ECOGENE',
'BIOGRID': 'BIOGRID',
'GENBANK_DNA_ACCESSION': 'NCBILocus',
'VECTORBASE': 'VectorBase',
'MIRBASE': 'miRBase',
'IMGT/GENE-DB': 'IGMT',
'HGNC': 'HGNC',
'SYSTEMATIC_NAME': None,
'OFFICIAL_SYMBOL': None,
'REFSEQ_GENOMIC_DNA_ACCESSION': 'NCBILocus',
'GENBANK_PROTEIN_GI': 'NCBIgi',
'REFSEQ_PROTEIN_ACCESSION': 'RefSeqProt',
'SYNONYM': None,
'GRID_LEGACY': None,
# the following showed up in 3.3.124
'UNIPROT-ACCESSION': 'UniprotKB',
'SWISS-PROT': 'Swiss-Prot',
'OFFICIAL SYMBOL': None,
'ENSEMBL RNA': None,
'GRID LEGACY': None,
'ENSEMBL PROTEIN': None,
'REFSEQ-RNA-GI': None,
'REFSEQ-RNA-ACCESSION': None,
'REFSEQ-PROTEIN-GI': None,
'REFSEQ-PROTEIN-ACCESSION-VERSIONED': None,
'REFSEQ-PROTEIN-ACCESSION': None,
'REFSEQ-LEGACY': None,
'SYSTEMATIC NAME': None,
'ORDERED LOCUS': None,
'UNIPROT-ISOFORM': 'UniprotKB',
'ENSEMBL GENE': 'ENSEMBL',
'CGD': None, # Not sure what this is?
'WORMBASE-OLD': 'WormBase'
}
if idtype in idtype_to_prefix_map:
prefix = idtype_to_prefix_map.get(idtype)
else:
logger.warning("unmapped prefix %s", prefix)
return prefix
def getTestSuite(self):
import unittest
from tests.test_biogrid import BioGridTestCase
# TODO add InteractionAssoc tests
# TODO add test about if all prefixes are mapped?
test_suite = \
unittest.TestLoader().loadTestsFromTestCase(BioGridTestCase)
return test_suite
| 15,927 | 5,235 |
from .eLABJournalPager import *
class Experiments(eLABJournalPager):
pass | 80 | 27 |
from dataclasses import dataclass
from typing import Any, Sequence
@dataclass(eq=False, frozen=True)
class TextSegment:
text_id: str
segment_ref: Any
segment: Sequence[str]
is_sentence_start: bool
is_in_range: bool
is_range_start: bool
is_empty: bool
def __repr__(self) -> str:
if self.is_empty:
segment = "<range>" if self.is_in_range else "EMPTY"
elif len(self.segment) > 0:
segment = " ".join(self.segment)
else:
segment = "NONEMPTY"
return f"{self.segment_ref} - {segment}"
| 581 | 194 |
# -*- coding: utf-8 -*-
import os
import unittest
from StringIO import StringIO
from mimecat import (Catalogue, _canonicalize_extension,
_parse_file, _parse_line)
TEST_MIME_TYPES = """
# This file maps Internet media types to unique file extension(s).
# Although created for httpd, this file is used by many software systems
# and has been placed in the public domain for unlimited redisribution.
#
# The table below contains both registered and (common) unregistered types.
# A type that has no unique extension can be ignored -- they are listed
# here to guide configurations toward known types and to make it easier to
# identify "new" types. File extensions are also commonly used to indicate
# content languages and encodings, so choose them carefully.
#
# Internet media types should be registered as described in RFC 4288.
# The registry is at <http://www.iana.org/assignments/media-types/>.
#
# MIME type (lowercased) Extensions
# ============================================ ==========
# application/activemessage
application/andrew-inset ez
application/json json
# application/kpml-request+xml
# audio/amr
audio/midi mid midi kar rmi
# audio/mobile-xmf
audio/mp4 mp4a
audio/mp4a-latm m4a m4p
audio/ogg oga ogg spx
image/jpeg jpeg jpg jpe
# image/jpm
# message/cpim
# message/delivery-status
message/rfc822 eml mime
text/css css
text/plain txt text conf def list log in
# text/xml
video/3gpp 3gp
video/3gpp2 3g2
video/ogg ogv
"""
class CatalogueTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_filename = "test.mime.types"
cls.test_filename_shibboleth = "test-shibboleth.mime.types"
with open(cls.test_filename, "w") as filep:
filep.write(TEST_MIME_TYPES)
with open(cls.test_filename_shibboleth, "w") as filep:
filep.write("text/plain2 txt\n")
filep.write("text/plain txt2\n")
@classmethod
def tearDownClass(cls):
os.unlink(cls.test_filename)
os.unlink(cls.test_filename_shibboleth)
def setUp(self):
self.catalogue = Catalogue(self.test_filename)
self.empty_catalogue = Catalogue(self.test_filename)
self.empty_catalogue.clear()
def test_init(self):
cat = Catalogue(self.test_filename)
self.assertIn("message/rfc822",
cat._known_mimetypes)
def test_init_with_filep(self):
with open(self.test_filename, "r") as filep:
cat = Catalogue(filep = filep)
self.assertIn("message/rfc822",
cat._known_mimetypes)
def test_init_with_order(self):
with open(self.test_filename, "r") as filep:
cat = Catalogue(self.test_filename_shibboleth, filep)
# test_filename should've been used first, so text/plain2 should
# come after text/plain in the extensions to type map
type_list = cat._exts_to_types[".txt"]
self.assertGreater(type_list.index("text/plain2"),
type_list.index("text/plain"))
def test_init_fails(self):
cat = None
with self.assertRaises(IOError):
cat = Catalogue(["BOGUS_FILE"])
self.assertIsNone(cat)
def test_clear(self):
self.catalogue.clear()
self.assertEqual( {}, self.catalogue._types_to_exts)
self.assertEqual( {}, self.catalogue._exts_to_types)
self.assertEqual(set(), self.catalogue._known_mediatypes)
self.assertEqual(set(), self.catalogue._known_mimetypes)
self.assertEqual(set(), self.catalogue._known_extensions)
def test_load_filenames_stops(self):
self.empty_catalogue.load_filenames([self.test_filename_shibboleth,
self.test_filename],
True)
self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)
self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)
self.assertEqual(len(self.empty_catalogue._known_extensions), 2)
def test_load_filenames_does_not_stop(self):
self.empty_catalogue.load_filenames([self.test_filename_shibboleth,
self.test_filename], False)
self.assertGreater(len(self.empty_catalogue._known_mediatypes), 1)
self.assertGreater(len(self.empty_catalogue._known_mimetypes), 2)
self.assertGreater(len(self.empty_catalogue._known_extensions), 2)
def test_load_filenames_fail(self):
with self.assertRaises(IOError):
self.empty_catalogue.load_filenames(["BOGUS_FILE", "BOGUS_FILE2"])
def test_load_filename(self):
self.empty_catalogue.load_filename(self.test_filename_shibboleth)
self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)
self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)
self.assertEqual(len(self.empty_catalogue._known_extensions), 2)
def test_load_filename_fails(self):
with self.assertRaises(IOError):
self.empty_catalogue.load_filename("BOGUS_FILE")
def test_load_file(self):
with open(self.test_filename_shibboleth) as filep:
self.empty_catalogue.load_file(filep)
self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)
self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)
self.assertEqual(len(self.empty_catalogue._known_extensions), 2)
def test_parse_file(self):
with open(self.test_filename_shibboleth) as filep:
items = [item for item in _parse_file(filep) if item is not None]
self.assertEqual(len(items), 2)
with open(self.test_filename) as filep:
items = [item for item in _parse_file(filep) if item is not None]
self.assertEqual(len(items), 13)
def test_parse_line(self):
result = _parse_line("#")
self.assertIsNone(result)
result = _parse_line("# more")
self.assertIsNone(result)
result = _parse_line("text/plain")
self.assertEqual(("text/plain", []), result)
result = _parse_line("text/plain ext1 ext2 ext3")
self.assertEqual(("text/plain", [".ext1", ".ext2", ".ext3"]), result)
result = _parse_line("text/plain ext1 ext2 ext3 # with comment")
self.assertEqual(("text/plain", [".ext1", ".ext2", ".ext3"]), result)
result = _parse_line("# text/plain ext1 ext2 ext3")
self.assertIsNone(result)
result = _parse_line("# text/plain ext1 ext2 ext3 # with comment")
self.assertIsNone(result)
def test_parse_line_fails(self):
with self.assertRaises(ValueError):
_ = _parse_line("invalid exts")
def test_known_mediatypes(self):
self.assertIn("application", self.catalogue.known_mediatypes)
self.assertIn("text", self.catalogue.known_mediatypes)
def test_known_mimetypes(self):
self.assertIn("application/json", self.catalogue.known_mimetypes)
self.assertIn("audio/mp4", self.catalogue.known_mimetypes)
def test_known_extensions(self):
self.assertIn(".ez", self.catalogue.known_extensions)
self.assertIn(".m4a", self.catalogue.known_extensions)
def test_get_extensions(self):
exts = self.catalogue.get_extensions("audio/midi")
self.assertEqual(len(exts), 4)
def test_get_extensions_fails(self):
with self.assertRaises(KeyError):
self.catalogue.get_extensions("bad/type")
def test_get_types(self):
types = self.catalogue.get_types(".txt")
self.assertEqual(len(types), 1)
types = self.catalogue.get_types("txt")
self.assertEqual(len(types), 1)
def test_get_types_with_duplicate(self):
self.catalogue.add_type("text/plain2", ".txt")
types = self.catalogue.get_types("txt")
self.assertIn("text/plain", types)
self.assertIn("text/plain2", types)
def test_get_types_fails(self):
with self.assertRaises(KeyError):
self.catalogue.get_types("asdf")
def test_add_type(self):
self.empty_catalogue.add_type("text/plain", "txt")
self.assertIn("text", self.empty_catalogue._known_mediatypes)
self.assertIn("text/plain", self.empty_catalogue._known_mimetypes)
self.assertIn(".txt", self.empty_catalogue._known_extensions)
self.empty_catalogue.clear()
self.empty_catalogue.add_type("text/plain", ".txt")
self.assertIn("text", self.empty_catalogue._known_mediatypes)
self.assertIn("text/plain", self.empty_catalogue._known_mimetypes)
self.assertIn(".txt", self.empty_catalogue._known_extensions)
self.empty_catalogue.clear()
self.empty_catalogue.add_type("text/plain", [".txt"])
self.assertIn("text", self.empty_catalogue._known_mediatypes)
self.assertIn("text/plain", self.empty_catalogue._known_mimetypes)
self.assertIn(".txt", self.empty_catalogue._known_extensions)
def test_add_types_with_duplicate_extensions(self):
self.empty_catalogue.add_type("text/plain", "txt")
self.empty_catalogue.add_type("text/doc", "txt")
self.assertIn("text/plain", self.empty_catalogue._exts_to_types[".txt"])
self.assertIn("text/doc", self.empty_catalogue._exts_to_types[".txt"])
self.assertIn(".txt", self.empty_catalogue._types_to_exts["text/plain"])
self.assertIn(".txt", self.empty_catalogue._types_to_exts["text/doc"])
def test_add_type_fails(self):
with self.assertRaises(ValueError):
self.empty_catalogue.add_type("textplain", ".txt")
def test_canonicalize_extension(self):
ret = _canonicalize_extension("test")
self.assertEqual(ret, ".test")
ret = _canonicalize_extension(".test")
self.assertEqual(ret, ".test")
ret = _canonicalize_extension("")
self.assertEqual(ret, "")
ret = _canonicalize_extension(None)
self.assertIsNone(ret)
| 10,084 | 3,297 |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"summary": "00_core.ipynb",
"plot": "00_core.ipynb",
"perf": "00_core.ipynb"}
modules = ["ks.py"]
doc_url = "https://JiaxiangBU.github.io/pyks/"
git_url = "https://github.com/fastai/pyks/tree/master/"
def custom_doc_links(name): return None | 374 | 157 |
from __future__ import print_function
"""
Author: Emmanuel Salawu
Email: dr.emmanuel.salawu@gmail.com
Copyright 2016 Emmanuel Salawu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys,os
import cPickle as cP, math
import subprocess as sbp
import numpy as np
import time
import argparse
# from Bio import SeqIO, Blast
# from Bio.Blast.Applications import NcbiblastnCommandline, NcbiblastpCommandline
# import Bio.Blast.NCBIXML as NCBIXML
d3_ = {'A': (2649921, 0.109), 'C': (284780, 0.012), 'E': (2037420, 0.084), 'D': (1266155, 0.052), 'G': (1127806, 0.046), 'F': (927803, 0.038), 'I': (1418628, 0.058), 'H': (535711, 0.022), 'K': (1547402, 0.064), 'M': (642753, 0.026), 'L': (2794992, 0.115), 'N': (916269, 0.038), 'Q': (1098361, 0.045), 'P': (509355, 0.021), 'S': (1298566, 0.053), 'R': (1427259, 0.059), 'T': (1180051, 0.049), 'W': (337622, 0.014), 'V': (1505317, 0.062), 'Y': (795511, 0.033)}
#Source http://web.expasy.org/docs/relnotes/relstat.html
rel_ab = {'A': 0.08259999999999999, 'C': 0.0137, 'E': 0.0674, 'D': 0.0546, 'G': 0.0708, 'F': 0.038599999999999995, 'I': 0.0593, 'H': 0.0227, 'K': 0.0582, 'M': 0.0241, 'L': 0.0965, 'N': 0.0406, 'Q': 0.0393, 'P': 0.0472, 'S': 0.0659, 'R': 0.0553, 'T': 0.053399999999999996, 'W': 0.0109, 'V': 0.0687, 'Y': 0.0292}
#Based on log_e math.log
d4_ = {key:(value[0], value[1], rel_ab[key], value[1]/rel_ab[key], math.log (value[1]/rel_ab[key])) for (key, value) in d3_.items()}
d4_rounded = {i: (d4_[i][0], d4_[i][1], round(d4_[i][2], 3), round(d4_[i][3], 3), round(d4_[i][4], 3),) for i in d4_}
needed_contacts = None
needed_dir = 'contacts'
compressed_hh_contacts = None
def scoreMatchedSeq (matchedSeq, contacts = [0], aaScore = d4_, divisor = 1.0):
score = sum ([aaScore.get (aa, (0., 0.))[-1] for aa in matchedSeq])
normalizsedScore = score / (divisor or 1.0)
return round (normalizsedScore, 3), round (score, 3), 100.0 - np.mean (contacts)
def parseLine (line, lenOfPattern, matchedPattern):
global needed_contacts
#print matchedPattern
split_line = line.split ()
split_0_line = split_line [0].split ('_')
start_of_full_helix, end_of_full_helix = int (split_0_line [3]), int (split_0_line [4])
seq = split_line [1]
start = int (split_line [2])
stop = start + lenOfPattern
index_in_db = int (split_line [3])
try:
if not needed_contacts:
needed_contacts = cP.load (open ('needed_contacts_%s.cP' % needed_dir, 'rb'))
contacts = needed_contacts [index_in_db][1] [start : stop]
except:
contacts = [0]
start_in_pdb = start_of_full_helix + start
stop_in_pdb = start_of_full_helix + stop
matchedSeq = seq [start : stop]
score = scoreMatchedSeq (matchedSeq, contacts)
return [score,
(split_0_line [0], split_0_line [1], 1, #int (split_0_line [2]),
start_of_full_helix, end_of_full_helix, ),
split_line [1], start, (start_in_pdb, stop_in_pdb - 1),
int (split_line [3]),
matchedPattern, matchedSeq,
highlight1 (split_line [1], start, stop, matchedPattern),
#, ,
]
def parseRawOutput (raw_output):
raw_output_seg = [l for l in
[[k for k in [j.strip() for j in i.splitlines()] if k] for i in
raw_output.split ('>')] if l]
return raw_output_seg
def parseKey (key):
global split_key_list
key_proper = ''
split_key_1 = key.split ('_')
split_key_list = []
for index_1, item_1 in enumerate ( split_key_1 ):
index_1p = index_1 - 1
if item_1.isdigit():
split_key_2 = [item_1]
else:
split_key_2 = list (item_1)
for item_2 in split_key_2:
try:
split_key_list.append (int (item_2))
except:
split_key_list.append (item_2)
if len (item_1) > 1:
if not index_1: #len (split_key_list) < 6:
start = 0
else:
if (split_key_list [(index_1p * 2) + 1] + split_key_list [(index_1p * 2) + 3]) < split_key_list [(index_1p * 2) + 5]:
start = 1
else:
start = 3
key_proper += item_1 [start :]
return key_proper, split_key_list, effectiveLenOfKeyProper (key_proper)
#E4D3E_5_D3E3H
#04589 589
# 1 3
# 1 3 5 7 9 11
def parseRawOutputIntoNativeTypes (raw_output_seg):
raw_output_seg_native = []
for item in raw_output_seg:
raw_output_seg_native.append ([])
for lineId, line in enumerate (item):
if lineId == 0:
raw_output_seg_native [-1].append ( parseKey (line) )
else:
raw_output_seg_native [-1].append ( parseLine (line,
raw_output_seg_native [-1][0][2],
raw_output_seg_native [-1][0][0] ) )
return raw_output_seg_native
def effectiveLenOfKeyProper (key):
'F0D2E2H'
return sum ([int(i) if i.isdigit () else 1 for i in list (key)])
def simpleHighlight1 (seq, start, stop, tagBeg = '<span class="sh_1">', tagEnd = '</span>'):
return seq [:start] + tagBeg + seq [start : stop] + tagEnd + seq [stop:]
def simpleHighlight2 (seq, start, stop, tagBeg = '<span class="sh_1">', tagEnd = '</span>'):
return seq [:start] + tagBeg + seq [start : stop] + tagEnd + seq [stop:]
def breakSeq (seq, currentOffset=0, segLen=30):
pos = 0
lenSeq = len (seq)
if (lenSeq + currentOffset) < segLen:
return seq
output = ''
positions = range (-currentOffset, lenSeq, segLen)
#print positions
for index, pos in enumerate (positions [1:]):
output += seq [max (0, positions [index]): pos] + '<br/>'
output += seq [pos :]
return output
def detailedHighlightShortSeq (shortSeq, matchedPattern, tagBeg = '<span class="dh_1">', tagEnd = '</span>', start=0):
'4xr7_J_1_544_561 TTLLTDLGYLFDMMERSH 10 208869'
#global possitionsProcessed
#print shortSeq
#F0D2E2H FDMMERSH
# 01234567
neededStr = ''
aaAndNumbers = [int(i) if i.isdigit () else i for i in matchedPattern]
#['F', 0, 'D', 2, 'E', 2, 'H']
#<span >FD</span>MM<span >E</span>RS<span >H</span>
possitionsProcessed = 0; numOfBrAdded = (start // 30) + 1
for index, aaOrNum in enumerate (aaAndNumbers):
#print possitionsProcessed
if shortSeq and (index % 2):
if ((start + possitionsProcessed) > 30 * numOfBrAdded):
neededStr += '<br/>'
numOfBrAdded += 1
if (possitionsProcessed == 0) and shortSeq:
print (shortSeq)
print (possitionsProcessed)
neededStr += tagBeg + shortSeq [possitionsProcessed] + tagEnd
# elif ((start + possitionsProcessed) > 30 * numOfBrAdded):
# neededStr += '<br/>'
# numOfBrAdded += 1
neededStr += shortSeq [possitionsProcessed + 1 : possitionsProcessed + 1 + aaOrNum]
neededStr += tagBeg + shortSeq [possitionsProcessed + 1 + aaOrNum] + tagEnd
possitionsProcessed += 1 + aaOrNum
# if index % 2:
# if aaOrNum:
# neededStr += tagBeg + shortSeq [possitionsProcessed] + tagEnd
# neededStr += shortSeq [possitionsProcessed + 1 : possitionsProcessed + 1 + aaOrNum]
# neededStr += tagBeg + shortSeq [possitionsProcessed + 1 + aaOrNum] + tagEnd
# possitionsProcessed += 1 + aaOrNum
# else:
# neededStr += tagBeg + shortSeq [possitionsProcessed : possitionsProcessed + 2] + tagEnd
# possitionsProcessed += 2
neededStr = neededStr.replace (tagEnd + tagBeg, '')
return neededStr
def highlight1 (seq, start, stop, matchedPattern,
tagBegSimple = '<span class="sh_1">', tagEndSimple = '</span>',
tagBegDetailed = '<span class="dh_1">', tagEndDetailed = '</span>'):
return breakSeq (seq [:start], currentOffset=0, segLen=30) + tagBegSimple + \
detailedHighlightShortSeq (seq [start : stop], matchedPattern, tagBegDetailed, tagEndDetailed, start=start) \
+ tagEndSimple + breakSeq (seq [stop:], currentOffset=stop % 30, segLen=30)
def generateSortedResults (raw_output_seg_native):
results = []
for each_result in raw_output_seg_native:
results.extend (each_result [1:])
sorted_results = sorted (results, reverse=True)
return sorted_results
def process_output (raw_output, jobId):
#raw_output = sampleOutput
raw_output_seg = parseRawOutput (raw_output)
raw_output_seg_native = parseRawOutputIntoNativeTypes (raw_output_seg)
cP.dump (raw_output_seg_native, open ('%s.raw_output_seg_native' % jobId, 'wb'), -1)
sorted_results = generateSortedResults (raw_output_seg_native)
cP.dump (sorted_results, open ('%s.sorted_results' % jobId, 'wb'), -1)
return sorted_results, raw_output_seg_native
def genHtmlTable (sorted_results):
global compressed_hh_contacts
table_headers = ['#', 'U#', 'Matched<br/>Sequence (MS)', 'Matched<br/>Pattern', 'Full Helix (FH)', 'PDB ID: Chain',
' Positions in PDB <br/>(MS), (FH)', 'Helical<br/>Propensity', 'Contact', 'Interacting<br>Partners',
] #'View Helix in 3D', ]
output = '<tr>%s</tr>' % (''.join (['<th>%s</th>' % i for i in table_headers]), )
uniqueness = set ([])
for index, entry in enumerate (sorted_results):
seq = entry [7] # entry [2]
if seq in uniqueness:
unique = 'not_unique'
else:
uniqueness.add (seq)
unique = 'unique'
contact_ = '%.3f' % (100.0 - entry [0][2],) if (entry [0][2] != 100.0) else ''
helical_contact = ''
try:
if not compressed_hh_contacts:
compressed_hh_contacts = cP.load (open ('compressed_hh_contacts.cP' , 'rb'))
# PDB ID CHAIN start_end_pair
helical_contact_info = compressed_hh_contacts [entry [1][0]] [entry [1][1]] [(entry [1][3], entry [1][4])]
for hc_info in helical_contact_info:
helical_contact += "%(pdbid)s:%(chain)s (%(start)s, %(end)s)</br>" % \
{"pdbid": entry [1][0], "chain": hc_info [1], "start": hc_info [2], "end": hc_info [3]}
except:
pass
table_row = [str (index + 1), str (len (uniqueness)), entry [7], entry [6], entry [8],
'%s:%s'% (entry [1][0], entry [1][1]),
'%s, (%s, %s)' % (str (entry [4]), entry [1][3], entry [1][4], ),
'%.3f' % (entry [0][0],), contact_, helical_contact, # 'View Helix'
]
output += '<tr class="%s">%s</tr>' % (unique, ''.join (['<td>%s</td>' % i for i in table_row]), )
table_css = '''
.sh_1 {color: blue;}
.dh_1 {font-weight: 900;}
.monospace {font-family: "Courier New", Courier, monospace; font-size: 80%; }
td {font-size: 80%; }
tr:nth-child(2n) {
background-color:#F4F4F8;
}
tr:nth-child(2n+1) {
background-color:#EFF1F1;
}
'''
return '<style>%s</style><span class="monospace"><table id="results1">%s</table></span>' % (table_css,output)
def genViableNumbers (string):
if string.find ('/') != -1:
parts = string.split ('/')
firstPart = parts [0]
secPart = parts [1]
firstPart = [int (i) for i in firstPart.split (',')]
secPart = [int (i) for i in secPart.split (',')]
if len (firstPart) > 1:
firstPart [1] = firstPart [1] + 1
firstPart = range (*firstPart)
if len (secPart) > 1:
secPart [1] = secPart [1] + 1
secPart = range (*secPart)
needed_numbers = firstPart + secPart
else:
firstPart = [int (i) for i in string.split (',')]
if len (firstPart) > 1:
firstPart [1] = firstPart [1] + 1
firstPart = range (*firstPart)
needed_numbers = firstPart
return needed_numbers
def genViableAlphabets (string):
return string.split ('/')
def genSubQueries (list_of_lists): # [[A, D], [1, 2, 3], [E, f]]
sub_queries = []
for itemI in list_of_lists [0]:
for itemJ in list_of_lists [1]:
for itemK in list_of_lists [2]:
sub_queries.append ('%s%s%s' % (itemI, itemJ, itemK))
return sub_queries
def genViableQueries (needed_sub_queries): # [['A0S', 'A2S', 'A4S'], ['S2L', 'S3L'], ['A3A']]
num_levels = len (needed_sub_queries)
viable_queries = []
for itemsL0 in needed_sub_queries [0]:
if num_levels > 1:
for itemsL1 in needed_sub_queries [1]:
if num_levels > 2:
for itemsL2 in needed_sub_queries [2]:
if num_levels > 3:
for itemsL3 in needed_sub_queries [3]:
#viable_queries.append (itemsL0 + '_' + itemsL1 + '_' + itemsL2 + '_' + itemsL3)
viable_queries.append (itemsL0 + itemsL1 [1:] + '_' + str (1 + int (itemsL0 [1:-1]) + 1 + int (itemsL1 [1:-1])) + '_' + itemsL2 [:-1] + itemsL3)
else:
#viable_queries.append (itemsL0 + '_' + itemsL1 + '_' + itemsL2)
#viable_queries.append (itemsL0 + itemsL1 [1:-1] + itemsL2)
viable_queries.append (itemsL0 + itemsL1 [1:] + '_' + str (1 + int (itemsL0 [1:-1])) + '_' + itemsL1 [:-1] + itemsL2)
else:
#viable_queries.append (itemsL0 + '_' + itemsL1)
viable_queries.append (itemsL0 + itemsL1 [1:])
else:
viable_queries.append (itemsL0)
return viable_queries
def processQuery (query):
query_list = query.split ()
expanded_subset = []
for index, subset in enumerate (query_list):
if index % 2 == 0:
expanded_subset.append (genViableAlphabets (subset))
else:
expanded_subset.append (genViableNumbers (subset))
needed_sub_queries = []
for index in range (0, len (expanded_subset) - 2, 2):
needed_sub_queries.append (genSubQueries (expanded_subset [index : index + 3]))
viable_queries = genViableQueries (needed_sub_queries)
return viable_queries
def genFastaForQuery (viable_queries, outputFileName):
fasta = ''
for queryIndex, viable_query in enumerate (viable_queries):
if queryIndex == 0:
fasta += '>%s %s\n%s\n' % (viable_query.replace ('_', ' '), outputFileName, viable_query)
else:
fasta += '>%s\n%s\n' % (viable_query.replace ('_', ' '), viable_query)
return fasta
def mainQueryDb (query, outputFileName = 'outputFileName'):
with open ('%s.submitted' % (outputFileName,), 'w') as submittedQuery:
submittedQuery.write (query)
viable_queries = processQuery (query)
fasta_query = genFastaForQuery (viable_queries, outputFileName)
with open ('query.fasta', 'w') as query_file: query_file.write (fasta_query)
sbp.call ('cp query.fasta pending_jobs', shell=True)
def query(example_query,outputFileName):
raw_result_file = '%s.output' % outputFileName
while os.path.exists(raw_result_file):
os.remove(raw_result_file)
mainQueryDb(example_query,outputFileName = outputFileName)
i = 0
while not os.path.exists(raw_result_file):
i +=1
status = '.' * (i%4)
print( 'running%-3s'% status, end='\r')
sys.stdout.flush()
time.sleep(0.3)
raw_result = open (raw_result_file).read ()
sorted_results, raw_output_seg_native = process_output(raw_result,outputFileName)
html_table = genHtmlTable (sorted_results)
with open('%s_table.html'%outputFileName,'w') as f:
f.write(html_table)
print('The output file is %s_table.html'%outputFileName)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--query', help='The query string of TP-DB')
parser.add_argument('--output', help='The output prefix')
args = parser.parse_args()
query(args.query,args.output)
# from query import query
# query('asdfasdfasdf','sdfsdsa')
# query('asdfasdfasdf1','sdfsdsa1')
# query('asdfasdfasdf2','sdfsdsa2')
# query('asdfasdfasdf3','sdfsdsa3')
| 17,404 | 6,368 |
from tunepy2 import Genome
from tunepy2.interfaces import AbstractOptimizer, AbstractGenomeFactory, AbstractConvergenceCriterion
class BasicOptimizer(AbstractOptimizer):
"""
A very simple optimizer that builds new Genomes until convergence is satisfied.
"""
def __init__(
self,
initial_candidate: Genome,
genome_factory: AbstractGenomeFactory,
convergence_criterion: AbstractConvergenceCriterion):
"""
Creates a new BasicOptimizer.
:param initial_candidate: seed Genome object
:param genome_factory: creates new Genome objects
:param convergence_criterion: will declare convergence once criterion is satisfied
"""
self._candidate = initial_candidate
self._genome_factory = genome_factory
self._convergence_criterion = convergence_criterion
self._converged = False
def next(self):
"""
Performs the next iteration of optimization.
"""
old_candidate = self._candidate
new_candidate = self._genome_factory.build([old_candidate])
new_candidate.run()
if new_candidate.fitness > old_candidate.fitness:
self._candidate = new_candidate
self._converged = self._convergence_criterion.converged(old_candidate, new_candidate)
@property
def converged(self) -> bool:
"""
Whether or not this algorithm has converged
:return: true when this algorithm has converged or false if not
"""
return self._converged
@property
def best_genome(self) -> Genome:
"""
The best genome so far
:return: a Genome instance
"""
return self._candidate
| 1,745 | 488 |
from copy import deepcopy
from .classic import ClassicGRN
import numpy as np
import tensorflow as tf
class GPUGRN(ClassicGRN):
def __init__(self):
pass
def reset(self):
self.concentration = np.ones(
len(self.identifiers)) * (1.0/len(self.identifiers))
self.tf_input_conc = tf.convert_to_tensor(
self.concentration[0:self.num_input], dtype=tf.float32)
self.tf_output_conc = tf.convert_to_tensor(
self.concentration[self.num_input:(self.num_input +
self.num_output)],
dtype=tf.float32)
self.tf_regulatory_conc = tf.convert_to_tensor(
self.concentration[self.num_input+self.num_output:],
dtype=tf.float32)
return self
def warmup(self, nsteps):
self.concentration[0:self.num_input] = np.zeros(self.num_input)
for i in range(nsteps):
super(GPUGRN, self).step()
self.tf_input_conc = tf.convert_to_tensor(
self.concentration[0:self.num_input], dtype=tf.float32)
self.tf_output_conc = tf.convert_to_tensor(
self.concentration[self.num_input:(self.num_input +
self.num_output)],
dtype=tf.float32)
self.tf_regulatory_conc = tf.convert_to_tensor(
self.concentration[self.num_input+self.num_output:],
dtype=tf.float32)
def setup(self):
super(GPUGRN, self).setup()
self.length = self.num_input + self.num_output + self.num_regulatory
self.tf_input_conc = tf.convert_to_tensor(
self.concentration[0:self.num_input], dtype=tf.float32)
self.tf_output_conc = tf.convert_to_tensor(
self.concentration[self.num_input:(self.num_input +
self.num_output)],
dtype=tf.float32)
self.tf_regulatory_conc = tf.convert_to_tensor(
self.concentration[self.num_input+self.num_output:],
dtype=tf.float32)
self.tf_sigs = tf.convert_to_tensor(self.enhance_match -
self.inhibit_match,
dtype=tf.float32)
self.tf_beta = tf.convert_to_tensor(self.beta, dtype=tf.float32)
self.tf_delta_n = tf.convert_to_tensor(self.delta/self.length,
dtype=tf.float32)
self.tf_output_mask = tf.convert_to_tensor(
np.concatenate((np.ones(self.num_input),
np.zeros(self.num_output),
np.ones(self.num_regulatory))),
dtype=tf.float32)
def get_signatures(self):
with tf.Session() as s:
return s.run(self.tf_sigs)
def get_concentrations(self):
with tf.Session() as s:
return s.run(tf.concat([self.tf_input_conc,
self.tf_output_conc,
self.tf_regulatory_conc], 0))
def set_input(self, input_t):
inp_concs = tf.convert_to_tensor(input_t, dtype=tf.float32)
self.tf_input_conc = inp_concs
def step(self):
concs = tf.concat([self.tf_input_conc, self.tf_output_conc,
self.tf_regulatory_conc], 0)
conc_diff = tf.multiply(concs, self.tf_output_mask)
conc_diff = tf.reshape(conc_diff, [1, self.length])
conc_diff = tf.matmul(conc_diff, self.tf_sigs)
conc_diff = tf.multiply(self.tf_delta_n, conc_diff)
concs = tf.add(concs, conc_diff)
concs = tf.maximum(0.0, concs)
concs = tf.reshape(concs, [self.length])
_, regs = tf.split(concs, [self.num_input,
self.num_regulatory+self.num_output])
sumconcs = tf.reduce_sum(regs)
concs = tf.cond(tf.greater(sumconcs, 0),
lambda: tf.div(concs, sumconcs), lambda: concs)
_, self.tf_output_conc, self.tf_regulatory_conc = tf.split(
concs, [self.num_input, self.num_output, self.num_regulatory])
def get_output_tensor(self):
return self.tf_output_conc
def get_output(self):
with tf.Session() as s:
return s.run(self.get_output_tensor())
def clone(self):
g = GPUGRN()
g.identifiers = deepcopy(self.identifiers)
g.enhancers = deepcopy(self.enhancers)
g.inhibitors = deepcopy(self.inhibitors)
g.beta = deepcopy(self.beta)
g.delta = deepcopy(self.delta)
g.num_input = deepcopy(self.num_input)
g.num_output = deepcopy(self.num_output)
g.num_regulatory = deepcopy(self.num_regulatory)
return g
| 4,759 | 1,594 |
# modified for custom training and testing on GPU by Utkarsh Patel
from classifiers import AbstractTokenizedDocumentClassifier
from embeddings import WordEmbeddings
from nnclassifiers import StackedLSTMTokenizedDocumentClassifier, CNNTokenizedDocumentClassifier
from nnclassifiers_experimental import StructuredSelfAttentiveSentenceEmbedding
from readers import JSONPerLineDocumentReader, AHVersusDeltaThreadReader
from tcframework import LabeledTokenizedDocumentReader, AbstractEvaluator, Fold, TokenizedDocumentReader, \
TokenizedDocument, ClassificationEvaluator
from comment import Comment
from vocabulary import Vocabulary
import argparse, os
import numpy as np
import pickle
class ClassificationExperiment:
def __init__(self, labeled_document_reader: LabeledTokenizedDocumentReader,
classifier: AbstractTokenizedDocumentClassifier, evaluator: AbstractEvaluator):
self.reader = labeled_document_reader
self.classifier = classifier
self.evaluator = evaluator
def run(self) -> None:
__folds = self.reader.get_folds()
for i, fold in enumerate(__folds, start=1):
assert isinstance(fold, Fold)
assert fold.train and fold.test
print("Running fold %d/%d" % (i, len(__folds)))
self.classifier.train(fold.train)
predicted_labels = self.classifier.test(fold.test, fold_no=i)
self.evaluate_fold(fold.test, predicted_labels)
print("Evaluating after %d folds" % i)
self.evaluator.evaluate()
print("Final evaluation; reader.input_path_train was %s" % self.reader.input_path_train)
self.evaluator.evaluate()
def evaluate_fold(self, labeled_document_instances: list, predicted_labels: list):
assert labeled_document_instances
assert len(predicted_labels)
assert len(labeled_document_instances) == len(predicted_labels), "Prediction size mismatch"
assert isinstance(labeled_document_instances[0].label, type(predicted_labels[0]))
# convert string labels int
all_gold_labels = [doc.label for doc in labeled_document_instances]
# collect IDs
ids = [doc.id for doc in labeled_document_instances]
self.evaluator.add_single_fold_results(all_gold_labels, predicted_labels, ids)
def label_external(self, document_reader: TokenizedDocumentReader) -> dict:
self.classifier.train(self.reader.train, validation=False)
instances = document_reader.instances
predictions, probs = self.classifier.test(instances)
probs = list(probs)
result = dict()
for instance, prediction, prob in zip(instances, predictions, probs):
assert isinstance(instance, TokenizedDocument)
# assert isinstance(prediction, float)
# get id and put the label to the resulting dictionary
cur_text = ' '.join(instance.tokens)
result[instance.id] = (prediction, prob)
return result
def cross_validation_ah(model_type):
# classification without context
import random
random.seed(1234567)
import tensorflow as tf
if tf.test.is_gpu_available():
strategy = tf.distribute.MirroredStrategy()
print('Using GPU')
else:
raise ValueError('CPU not recommended.')
with strategy.scope():
vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz')
embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz')
reader = JSONPerLineDocumentReader('data/experiments/ah-classification1/exported-3621-sampled-positive-negative-ah-no-context.json', True)
e = None
if model_type == 'cnn':
e = ClassificationExperiment(reader, CNNTokenizedDocumentClassifier(vocabulary, embeddings), ClassificationEvaluator())
else:
e = ClassificationExperiment(reader, StackedLSTMTokenizedDocumentClassifier(vocabulary, embeddings), ClassificationEvaluator())
e.run()
def cross_validation_thread_ah_delta_context3():
# classification with context
import random
random.seed(1234567)
import tensorflow as tf
if tf.test.is_gpu_available():
strategy = tf.distribute.MirroredStrategy()
print('Using GPU')
else:
raise ValueError('CPU not recommended.')
with strategy.scope():
vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz')
embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz')
reader = AHVersusDeltaThreadReader('data/sampled-threads-ah-delta-context3', True)
e = ClassificationExperiment(reader, StructuredSelfAttentiveSentenceEmbedding(vocabulary, embeddings, '/tmp/visualization-context3'), ClassificationEvaluator())
e.run()
def train_test_model_with_context(train_dir, indir, outdir):
'''Custom training and testing SSAE model
:param train_dir: Path to JSON file containing training examples
:param indir: Path to LOG file containing examples as Comment() object (which has already been classified by Bert)
:param outdir: Path to LOG file to be created by adding prediction of this model as well'''
import random
random.seed(1234567)
import tensorflow as tf
if tf.test.is_gpu_available():
strategy = tf.distribute.MirroredStrategy()
print('Using GPU')
else:
raise ValueError('CPU not recommended.')
with strategy.scope():
vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz')
embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz')
reader = JSONPerLineDocumentReader(train_dir, True)
e = ClassificationExperiment(reader, StructuredSelfAttentiveSentenceEmbedding(vocabulary, embeddings), ClassificationEvaluator())
test_comments = TokenizedDocumentReader(indir)
result = e.label_external(test_comments)
for k in result.keys():
print(f'{k}: {result[k]}')
instances = dict()
e = Comment(-1, 'lol', 'ah')
f = open(indir, 'rb')
try:
while True:
e = pickle.load(f)
print(e)
instances[str(e.id)] = e
except EOFError:
f.close()
f = open(outdir, 'wb')
for key in result.keys():
model_label, model_score = result[key]
model_label = model_label.lower()
score = model_score[1]
if model_label == 'none':
score = model_score[0]
instances[key].add_model(model_type, model_label, score, None)
e = instances[key]
print(e)
print(e.labels)
print(e.scores)
print('=' * 20)
pickle.dump(instances[key], f)
f.close()
def train_test_model_no_context(model_type, train_dir, indir, outdir):
# Training and testing CNN / BiLSTM model on custom data
# :param train_dir: Path to JSON file containing training examples
# :param indir: Path to LOG file containing examples as Comment() object (which has already been classified by Bert)
# :param outdir: Path to LOG file to be created by adding prediction of this model as well
import random
random.seed(1234567)
import tensorflow as tf
if tf.test.is_gpu_available():
strategy = tf.distribute.MirroredStrategy()
print('Using GPU')
else:
raise ValueError('CPU not recommended.')
with strategy.scope():
vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz')
embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz')
reader = JSONPerLineDocumentReader(train_dir, True)
e = None
if model_type == 'cnn':
e = ClassificationExperiment(reader, CNNTokenizedDocumentClassifier(vocabulary, embeddings), ClassificationEvaluator())
else:
e = ClassificationExperiment(reader, StackedLSTMTokenizedDocumentClassifier(vocabulary, embeddings), ClassificationEvaluator())
# e.run()
test_comments = TokenizedDocumentReader(indir)
result = e.label_external(test_comments)
for k in result.keys():
print(f'{k}: {result[k]}')
instances = dict()
e = Comment(-1, 'lol', 'ah')
f = open(indir, 'rb')
try:
while True:
e = pickle.load(f)
print(e)
instances[str(e.id)] = e
except EOFError:
f.close()
f = open(outdir, 'wb')
for key in result.keys():
model_label, model_score = result[key]
model_label = model_label.lower()
score = model_score[1]
if model_label == 'none':
score = model_score[0]
instances[key].add_model(model_type, model_label, score, None)
e = instances[key]
print(e)
print(e.labels)
print(e.scores)
print('=' * 20)
pickle.dump(instances[key], f)
f.close()
def main3():
# Custom training and testing for context-model (SSAE)
parser = argparse.ArgumentParser()
parser.add_argument("--train_dir", default=None, type=str, required=True, help="Path to JSON file containing training examples")
parser.add_argument("--indir", default=None, type=str, required=True, help="Path to LOG file containing examples as Comment() object (which has already been classified by Bert)")
parser.add_argument("--outdir", default=None, type=str, required=True, help="Path to LOG file to be created by adding prediction of this model as well")
args = parser.parse_args()
train_test_model_with_context(args.train_dir, args.indir, args.outdir)
def main2():
# Custom training and testing for no-context models
parser = argparse.ArgumentParser()
parser.add_argument("--model", default=None, type=str, required=True, help="Model used for classification")
parser.add_argument("--train_dir", default=None, type=str, required=True, help="Path to JSON file containing training examples")
parser.add_argument("--indir", default=None, type=str, required=True, help="Path to LOG file containing examples as Comment() object (which has already been classified by Bert)")
parser.add_argument("--outdir", default=None, type=str, required=True, help="Path to LOG file to be created by adding prediction of this model as well")
args = parser.parse_args()
train_test_model_no_context(args.model, args.train_dir, args.indir, args.outdir)
def main():
# For supervised learning task (with or without context) as described in the paper
parser = argparse.ArgumentParser()
parser.add_argument("--model", default=None, type=str, required=True, help="Model used for classification")
args = parser.parse_args()
if args.model == 'ssase':
cross_validation_thread_ah_delta_context3()
else:
cross_validation_ah(args.model)
if __name__ == '__main__':
# main()
main2()
| 10,892 | 3,250 |
import pytest
import numpy as np
from ardent.utilities import _validate_scalar_to_multi
from ardent.utilities import _validate_ndarray
from ardent.utilities import _validate_xyz_resolution
from ardent.utilities import _compute_axes
from ardent.utilities import _compute_coords
from ardent.utilities import _multiply_by_affine # TODO: write test for this function.
"""
Test _validate_scalar_to_multi.
"""
def test__validate_scalar_to_multi():
# Test proper use.
kwargs = dict(value=1, size=1, dtype=float)
correct_output = np.array([1], float)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=1, size=0, dtype=int)
correct_output = np.array([], int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=9.5, size=4, dtype=int)
correct_output = np.full(4, 9, int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=[1, 2, 3.5], size=3, dtype=float)
correct_output = np.array([1, 2, 3.5], float)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=[1, 2, 3.5], size=3, dtype=int)
correct_output = np.array([1, 2, 3], int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=(1, 2, 3), size=3, dtype=int)
correct_output = np.array([1, 2, 3], int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=np.array([1, 2, 3], float), size=3, dtype=int)
correct_output = np.array([1, 2, 3], int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
# Test improper use.
kwargs = dict(value=[1, 2, 3, 4], size='size: not an int', dtype=float)
expected_exception = TypeError
match = "size must be interpretable as an integer."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[], size=-1, dtype=float)
expected_exception = ValueError
match = "size must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[1, 2, 3, 4], size=3, dtype=int)
expected_exception = ValueError
match = "The length of value must either be 1 or it must match size."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=np.arange(3*4, dtype=int).reshape(3,4), size=3, dtype=float)
expected_exception = ValueError
match = "value must not have more than 1 dimension."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[1, 2, 'c'], size=3, dtype=int)
expected_exception = ValueError
match = "value and dtype are incompatible with one another."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value='c', size=3, dtype=int)
expected_exception = ValueError
match = "value and dtype are incompatible with one another."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
"""
Test _validate_ndarray.
"""
def test__validate_ndarray():
# Test proper use.
kwargs = dict(array=np.arange(3, dtype=int), dtype=float)
correct_output = np.arange(3, dtype=float)
assert np.array_equal(_validate_ndarray(**kwargs), correct_output)
kwargs = dict(array=[[0,1,2], [3,4,5]], dtype=float)
correct_output = np.arange(2*3, dtype=float).reshape(2,3)
assert np.array_equal(_validate_ndarray(**kwargs), correct_output)
kwargs = dict(array=np.array([0,1,2]), broadcast_to_shape=(2,3))
correct_output = np.array([[0,1,2], [0,1,2]])
assert np.array_equal(_validate_ndarray(**kwargs), correct_output)
kwargs = dict(array=np.array(7), required_ndim=1)
correct_output = np.array([7])
assert np.array_equal(_validate_ndarray(**kwargs), correct_output)
# Test improper use.
# Validate arguments.
kwargs = dict(array=np.arange(3), minimum_ndim=1.5)
expected_exception = TypeError
match = "minimum_ndim must be of type int."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), minimum_ndim=-1)
expected_exception = ValueError
match = "minimum_ndim must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), required_ndim=1.5)
expected_exception = TypeError
match = "required_ndim must be either None or of type int."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), required_ndim=-1)
expected_exception = ValueError
match = "required_ndim must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), dtype="not of type type")
expected_exception = TypeError
match = "dtype must be either None or a valid type."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
# Validate array.
kwargs = dict(array=np.array(print), dtype=int)
expected_exception = TypeError
match = "array is of a type that is incompatible with dtype."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.array('string that is not an int'), dtype=int)
expected_exception = ValueError
match = "array has a value that is incompatible with dtype."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.array([[], 1]), dtype=None, forbid_object_dtype=True)
expected_exception = TypeError
match = "Casting array to a np.ndarray produces an array of dtype object \nwhile forbid_object_dtype == True and dtype != object."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), required_ndim=2)
expected_exception = ValueError
match = "If required_ndim is not None, array.ndim must equal it unless array.ndim == 0 and required_ndin == 1."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), minimum_ndim=2)
expected_exception = ValueError
match = "array.ndim must be at least equal to minimum_ndim."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
"""
Test _validate_xyz_resolution.
"""
def test__validate_xyz_resolution():
# Test proper use.
kwargs = dict(ndim=1, xyz_resolution=2)
correct_output = np.full(1, 2, float)
assert np.array_equal(_validate_xyz_resolution(**kwargs), correct_output)
kwargs = dict(ndim=4, xyz_resolution=1.5)
correct_output = np.full(4, 1.5, float)
assert np.array_equal(_validate_xyz_resolution(**kwargs), correct_output)
kwargs = dict(ndim=3, xyz_resolution=np.ones(3, int))
correct_output = np.ones(3, float)
assert np.array_equal(_validate_xyz_resolution(**kwargs), correct_output)
kwargs = dict(ndim=2, xyz_resolution=[3, 4])
correct_output = np.array([3, 4], float)
assert np.array_equal(_validate_xyz_resolution(**kwargs), correct_output)
# Test improper use.
kwargs = dict(ndim=2, xyz_resolution=[3, -4])
expected_exception = ValueError
match = "All elements of xyz_resolution must be positive."
with pytest.raises(expected_exception, match=match):
_validate_xyz_resolution(**kwargs)
kwargs = dict(ndim=2, xyz_resolution=[3, 0])
expected_exception = ValueError
match = "All elements of xyz_resolution must be positive."
with pytest.raises(expected_exception, match=match):
_validate_xyz_resolution(**kwargs)
"""
Test _compute_axes.
"""
def test__compute_axes():
# Test proper use.
# _compute_axes produces a list with a np.ndarray for each element in shape.
kwargs = dict(shape=(0, 1, 2), xyz_resolution=1, origin='center')
correct_output = [np.arange(dim_size) * dim_res - np.mean(np.arange(dim_size) * dim_res)
for dim_size, dim_res in zip((0, 1, 2), (1, 1, 1))]
for dim, coord in enumerate(_compute_axes(**kwargs)):
assert np.array_equal(coord, correct_output[dim])
kwargs = dict(shape=(1, 2, 3, 4), xyz_resolution=1.5, origin='center')
correct_output = [np.arange(dim_size) * dim_res - np.mean(np.arange(dim_size) * dim_res)
for dim_size, dim_res in zip((1, 2, 3, 4), (1.5, 1.5, 1.5, 1.5))]
for dim, coord in enumerate(_compute_axes(**kwargs)):
assert np.array_equal(coord, correct_output[dim])
kwargs = dict(shape=(2, 3, 4), xyz_resolution=[1, 1.5, 2], origin='center')
correct_output = [np.arange(dim_size) * dim_res - np.mean(np.arange(dim_size) * dim_res)
for dim_size, dim_res in zip((2, 3, 4), (1, 1.5, 2))]
for dim, coord in enumerate(_compute_axes(**kwargs)):
assert np.array_equal(coord, correct_output[dim])
kwargs = dict(shape=5, xyz_resolution=1, origin='center')
correct_output = [np.arange(dim_size) * dim_res - np.mean(np.arange(dim_size) * dim_res)
for dim_size, dim_res in zip((5,), (1,))]
for dim, coord in enumerate(_compute_axes(**kwargs)):
assert np.array_equal(coord, correct_output[dim])
kwargs = dict(shape=5, xyz_resolution=1, origin='zero')
correct_output = [np.arange(dim_size) * dim_res
for dim_size, dim_res in zip((5,), (1,))]
for dim, coord in enumerate(_compute_axes(**kwargs)):
assert np.array_equal(coord, correct_output[dim])
"""
Test _compute_coords.
"""
def test__compute_coords():
# Test proper use.
kwargs = dict(shape=5, xyz_resolution=1, origin='center')
correct_output = np.array([[-2], [-1], [0], [1], [2]])
assert np.array_equal(_compute_coords(**kwargs), correct_output)
kwargs = dict(shape=(3,4), xyz_resolution=1, origin='zero')
correct_output = np.array([[[0,0], [0,1], [0,2], [0,3]], [[1,0], [1,1], [1,2], [1,3]], [[2,0], [2,1], [2,2], [2,3]]])
assert np.array_equal(_compute_coords(**kwargs), correct_output)
"""
Perform tests.
"""
if __name__ == "__main__":
test__validate_scalar_to_multi()
test__validate_ndarray()
| 10,594 | 3,708 |
import requests, libvoikko, json, collections, sys
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
print("Need a url that points to a text file")
sys.exit(0)
r = requests.get(filename)
normalized = r.text.split()
v = libvoikko.Voikko("fi")
word_forms = [
(word, v.analyze(word)) for word in normalized if len(v.analyze(word)) > 0
]
flat_words = []
for item in word_forms:
word = item[0]
for i in item[1]:
flat_words.append({"BOOKWORD": word.lower(), **i})
f = open("kotus_all.json")
kotus = json.loads(f.read())
f.close()
book_bw = set([w["BASEFORM"] for w in flat_words])
kotus_w = set([w["word"] for w in kotus])
kotus_dict = dict([(x["word"], x) for x in kotus])
results = []
for bw in flat_words:
baseform = bw["BASEFORM"]
if baseform in kotus_dict:
results.append({**kotus_dict[baseform], **bw})
summary = collections.Counter(
[
(w["tn"], w["av"], w["SIJAMUOTO"], w["NUMBER"])
for w in results
if "SIJAMUOTO" in w and "NUMBER" in w
]
)
# utils
def query(tn, av, list_):
return sorted(
list(set([x["BOOKWORD"] for x in list_ if x["tn"] == tn and x["av"] == av]))
)
def queryform(form, list_):
return sorted(
list(
set(
[
x["BOOKWORD"]
for x in list_
if "SIJAMUOTO" in x and x["SIJAMUOTO"] == form
]
)
)
)
f = open("output.json", "w+")
f.write(json.dumps(results))
f.close()
print(summary) | 1,550 | 587 |
from re import compile
IMPERSONAL_EXPRESSIONS = [
compile('^(\.*)nós[\s|\.|$]'),
compile('Nos\s| nos[\s|$]|-nos[\s|$]'),
compile('[N|(\.|\s)n]oss[a|o]'),
compile('\w*[e|a]mos[,|\.|\s|$]')
]
| 208 | 103 |
from .utils.compatibility import *
from .utils.der import parse, encodeConstructed, encodePrimitive, DerFieldType
from .utils.binary import hexFromByteString, byteStringFromHex, base64FromByteString, byteStringFromBase64
class Signature:
def __init__(self, r, s, recoveryId=None):
self.r = r
self.s = s
self.recoveryId = recoveryId
def toDer(self, withRecoveryId=False):
hexadecimal = self._toString()
encodedSequence = byteStringFromHex(hexadecimal)
if not withRecoveryId:
return encodedSequence
return toBytes(chr(27 + self.recoveryId)) + encodedSequence
def toBase64(self, withRecoveryId=False):
return base64FromByteString(self.toDer(withRecoveryId))
@classmethod
def fromDer(cls, string, recoveryByte=False):
recoveryId = None
if recoveryByte:
recoveryId = string[0] if isinstance(string[0], intTypes) else ord(string[0])
recoveryId -= 27
string = string[1:]
hexadecimal = hexFromByteString(string)
return cls._fromString(string=hexadecimal, recoveryId=recoveryId)
@classmethod
def fromBase64(cls, string, recoveryByte=False):
der = byteStringFromBase64(string)
return cls.fromDer(der, recoveryByte)
def _toString(self):
return encodeConstructed(
encodePrimitive(DerFieldType.integer, self.r),
encodePrimitive(DerFieldType.integer, self.s),
)
@classmethod
def _fromString(cls, string, recoveryId=None):
r, s = parse(string)[0]
return Signature(r=r, s=s, recoveryId=recoveryId)
| 1,648 | 497 |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
#######################################################################
# Hazard_WCN.py
#
##
##########################################################################
##
#
# SOFTWARE HISTORY
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Oct 20, 2014 #3685 randerso Changed to support mixed case
# Jul 15, 2016 #5749 randerso Replaced ellipses with commas
#
##
import GenericHazards
import string, time, re, os, types, copy, sets
import ModuleAccessor, LogStream
import VTECTable
class TextProduct(GenericHazards.TextProduct):
Definition = copy.deepcopy(GenericHazards.TextProduct.Definition)
Definition['displayName'] = None
Definition['displayName'] = "BaselineHazard_WCN_<MultiPil> (Convective Watch)"
Definition["defaultEditAreas"] = "EditAreas_FIPS_<site>_<MultiPil>"
Definition["mapNameForCombinations"] = ["FIPS_<site>", "Marine_Zones_<site>"]
# Map background for creating Combinations
# Header configuration items
Definition["productName"] = "Watch County Notification" # name of product
Definition["fullStationID"] = "<fullStationID>" # full station identifier (4letter)
Definition["wmoID"] = "<wmoID>" # WMO ID
Definition["pil"] = "<pil>" # product pil
#Definition["areaName"] = "Statename" # Name of state, such as "Georgia"
Definition["wfoCityState"] = "<wfoCityState>" # Location of WFO - city state
Definition["wfoCity"] = "<wfoCity>" # WFO Name as it should appear in a text product
Definition["textdbPil"] = "<textdbPil>" # Product ID for storing to AWIPS text database.
Definition["awipsWANPil"] = "<awipsWANPil>" # Product ID for transmitting to AWIPS WAN.
Definition["outputFile"] = "{prddir}/TEXT/WCN_<MultiPil>.txt"
# OPTIONAL CONFIGURATION ITEMS
#Definition["database"] = "Official" # Source database. "Official", "Fcst", or "ISC"
#Definition["displayOutputDialog"] = 0 # If 1 will display results when finished
#Definition["debug"] = 1
#Definition["headlineEditAreaGroup"] = "Zones" # Name of EditAreaGroup for sampling headlines
Definition["purgeTime"] = 15 # Maximum hours for expireTime from issueTime
Definition["includeCities"] = 0 # Cities included in area header
Definition["cityDescriptor"] = "Including the cities of"
Definition["includeZoneNames"] = 0 # Zone names will be included in the area header
Definition["includeIssueTime"] = 0 # This should be set to zero for products
# that do not include a time lime below the UGC
#Definition["easPhrase"] = "" # Optional EAS phrase to be include in product header
Definition["lineLength"] = 66
#Definition["hazardSamplingThreshold"] = (10, None) #(%cov, #points)
Definition["statePartMode"] = "byState" #"byState" or "byPart" formatting
#options. byState summarizes
#count by state. "byPart"
#counts by part of state.
def __init__(self):
GenericHazards.TextProduct.__init__(self)
def _preProcessProduct(self, fcst, argDict):
#
# The code below determines the set of ETNs for the header
#
self._hazards = argDict['hazards']
hazards = self._hazards.rawAnalyzedTable()
allWatchList = []
for hazard in hazards:
if hazard['etn'] not in allWatchList:
allWatchList.append(hazard['etn'])
if len(allWatchList) == 1:
watchPhrase = " for Watch " + str(allWatchList[0])
else:
watchPhrase = " for Watches "
allWatchList.sort()
for x in xrange(len(allWatchList)):
watchPhrase = watchPhrase + str(allWatchList[x])
if x != len(allWatchList) - 1:
watchPhrase = watchPhrase + "/"
#
# Special Product header code to add watch number determined above
#
if self._areaName != "":
self._areaName = " for " + self._areaName
issuedByString = self.getIssuedByString()
productName = self.checkTestMode(argDict,
self._productName + watchPhrase)
s = self._wmoID + " " + self._fullStationID + " " + \
self._ddhhmmTime + "\n" + self._pil + "\n\n"
fcst = fcst + s.upper()
s = productName + "\n" +\
"National Weather Service " + self._wfoCityState + \
"\n" + issuedByString + self._timeLabel + "\n" + self._easPhrase + "\n"
fcst = fcst + s
return fcst
def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict):
#
# This is the header for an edit area combination
#
editArea = segmentAreas[0]
areaLabel = editArea
areaHeader = self.makeAreaHeader(
argDict, "", self._issueTime, expireTime,
self._areaDictionary, None, cityDescriptor=self._cityDescriptor,
areaList=segmentAreas, includeCities=self._includeCities,
includeZoneNames = self._includeZoneNames, includeIssueTime = self._includeIssueTime)
fcst = fcst + areaHeader + "\n"
return fcst
def _makeProduct(self, fcst, segmentAreas, argDict):
argDict["language"] = self._language
#issuance time
issuanceTime = self._issueTime.unixTime()
#
# Set up the edit areas being dealt with
#
editArea = segmentAreas[0]
areaLabel = editArea
#
# Build a list of the merged hazards being returned
#
listOfHazards = self._hazards.getHazardList(segmentAreas)
# Ensure hdln is defined, since other products can reset this
for h in listOfHazards:
if len(h['hdln']) == 0:
phensig = h['phen'] + '.' + h['sig']
if VTECTable.VTECTable.has_key(phensig):
h['hdln'] = VTECTable.VTECTable[phensig]['hdln']
#
# Prepare to build phrases
#
attrPhrase = ""
actionTest = []
hazardListLength = len(listOfHazards)
#
# Start building phrases
#
phraseType = "" #CANCEL, NEW, REPLACE, EXPIRE (zone listing wording)
#
# First check to see if this segment contains a CAN and a NEW
#
if hazardListLength == 2:
phraseType = "REPLACE"
activeActions = ['NEW','EXB','EXA','EXT','CON']
#
# Element 0 is is CAN, UPG. Element 1 is active actions
#
if listOfHazards[1]['act'] in activeActions and \
listOfHazards[0]['act'] in ['CAN', 'UPG']:
#change forces next block to execute, code savings
listOfHazards.reverse()
#
# Element 0 is active actions, Element 1 is CAN, UPG
#
if listOfHazards[1]['act'] in ['CAN','UPG'] and \
listOfHazards[0]['act'] in activeActions:
newWatch = listOfHazards[0]
oldWatch = listOfHazards[1]
newWatchName = self.hazardName(newWatch['hdln'], argDict,
False) + " " + str(newWatch['etn'])
oldWatchName = self.hazardName(oldWatch['hdln'], argDict,
False) + " " + str(oldWatch['etn'])
validTime = self.getTimingPhrase(newWatch, issuanceTime)
attrPhrase = "The National Weather Service has issued " + \
newWatchName + " " + validTime + \
" which replaces a portion of " + oldWatchName + '. ' + \
"The new watch is valid for the following areas"
#
# Element 0 is EXP, Element 1 is active actions
#
if listOfHazards[1]['act'] in activeActions and \
listOfHazards[0]['act'] == 'EXP':
#change forces next block to execute, code savings
listOfHazards.reverse()
#
# Element 0 is is active actions. Element 1 is EXP
#
if listOfHazards[1]['act'] == 'EXP' and \
listOfHazards[0]['act'] in activeActions:
newWatch = listOfHazards[0]
oldWatch = listOfHazards[1]
newWatchName = self.hazardName(newWatch['hdln'], argDict,
False) + " " + str(newWatch['etn'])
oldWatchName = self.hazardName(oldWatch['hdln'], argDict,
False) + " " + str(oldWatch['etn'])
validTime = self.getTimingPhrase(newWatch, issuanceTime)
if oldWatch['endTime'] > argDict['creationTime']:
expirePhrase = "will be allowed to expire."
else:
expirePhrase = "has expired."
attrPhrase = "The National Weather Service has issued " + \
newWatchName + ' ' + validTime + ". " + \
oldWatchName + " " + expirePhrase + \
" The new watch is valid for the following areas"
#
# Else if the hazardListLength isn't 2
#
else:
for eachHazard in listOfHazards:
etnString = str(eachHazard['etn'])
watchName = self.hazardName(eachHazard['hdln'], argDict,
False) + " " + etnString #complete name and etn
validTime = self.getTimingPhrase(eachHazard, issuanceTime)
#
# Phrase for NEW
#
if eachHazard['act'] == 'NEW':
attrPhrase = "The National Weather Service has issued " +\
watchName + " in effect " +\
validTime + " for the following areas"
phraseType = "NEW"
#
# Phrase for CON
#
elif eachHazard['act'] == 'CON':
attrPhrase = watchName + " remains valid " + validTime + \
" for the following areas"
phraseType = "NEW"
#
# Phrase for EXP
#
elif eachHazard['act'] == 'EXP':
if eachHazard['endTime'] > argDict['creationTime']:
attrPhrase = "The National Weather Service" + \
" will allow " + watchName + " to expire " +\
validTime + " for the following areas"
else:
attrPhrase = "The National Weather Service" + \
" has allowed " + watchName + " to expire" +\
" for the following areas"
phraseType = "EXPIRE"
#
# Phrase for CAN
#
elif eachHazard['act'] == 'CAN':
attrPhrase = "The National Weather Service" +\
" has cancelled " + watchName + \
" for the following areas"
phraseType = "CANCEL"
#
# Phrase for EXA and EXB
#
elif eachHazard['act'] in ['EXA', 'EXB']:
attrPhrase="The National Weather Service has extended " +\
watchName + " to include the following areas " + \
validTime
phraseType = "NEW"
#
# Phrase for EXT
#
elif eachHazard['act'] == 'EXT':
phraseType = "NEW"
#prevExpPhrase = self.getHourAMPMTimeZone(\
# eachHazard['previousEnd'], eachHazard['id'])
prevRec = copy.deepcopy(eachHazard)
prevRec['endTime'] = eachHazard['previousEnd']
prevExpPhrase = self.getTimingPhrase(prevRec, issuanceTime)
attrPhrase = watchName + ", previously in effect " +\
prevExpPhrase + ", is now in effect " + \
validTime + " for the following areas"
#
# Generic Phrase...should never reach this point
#
else:
startingPhrase = "The National Weather Service" + \
" has issued |* watch type *| |* watch number *|" + \
" until |* watch end time *| for the following areas"
attrPhrase = startingPhrase
phraseType = "NEW"
#
# Add phrase to forecast
#
fcst = fcst + attrPhrase + '\n\n'
# Get the phrasing set up for the type of event
if phraseType == "NEW":
county1 = "In {area} this watch includes {number} {placeType}"
county2 = "In {area} this watch includes {number} {placeTypes}"
indepCity1 = "In {area} this watch includes {number} " + \
"independent city"
indepCity2 = "In {area} this watch includes {number} " + \
"independent cities"
marine = "This watch includes the following adjacent coastal waters"
elif phraseType == "CANCEL":
county1 = "In {area} this cancels {number} {placeType}"
county2 = "In {area} this cancels {number} {placeTypes}"
indepCity1 = "In {area} this cancels {number} INDEPENDENT CITY"
indepCity2 = "In {area} this cancels {number} INDEPENDENT CITIES"
marine = "This cancels the following adjacent coastal waters"
elif phraseType == "EXPIRE":
county1 = "In {area} this allows to expire {number} {placeType}"
county2 = "In {area} this allows to expire {number} {placeTypes}"
indepCity1 = "In {area} this allows to expire {number} " +\
"independent city"
indepCity2 = "In {area} this allows to expire {number} " +\
"independent cities"
marine = "This allows to expire the following adjacent coastal waters"
elif phraseType == "REPLACE":
county1 = "In {area} the new watch includes {number} {placeType}"
county2 = "In {area} the new watch includes {number} {placeTypes}"
indepCity1 = "In {area} the new watch includes {number} " + \
"independent city"
indepCity2 = "In {area} the new watch includes {number} " + \
"independent cities"
marine = "The new watch includes the following adjacent coastal waters"
else:
raise Exception, "Illegal phraseType in WCN formatter. " +\
"Expected NEW, CANCEL, EXPIRE, or REPLACE. Got " + phraseType
# Add the list of counties
countyTuple = self._getFilteredAreaList(
segmentAreas, mode="COUNTY", areaDictName=self._areaDictionary)
fcst = fcst + self._makeTextFromCountyTuple(countyTuple,
mainFormatSingular = county1, mainFormatPlural = county2,
mode=self._statePartMode)
# Add the lists of independent cities
countyTuple = self._getFilteredAreaList(
segmentAreas, mode="CITY", areaDictName=self._areaDictionary)
fcst = fcst + self._makeTextFromCountyTuple(countyTuple,
mainFormatSingular = indepCity1, mainFormatPlural = indepCity2,
mode=self._statePartMode)
# Add the lists of marine zones
countyTuple = self._getFilteredAreaList(
segmentAreas, mode="ZONE", areaDictName=self._areaDictionary)
fcst = fcst + self._makeTextFromMarineTuple(countyTuple,
mainFormat = marine)
# Add the lists of cities
fcst = fcst + "\n\n" + self.getCityList(
segmentAreas, areaDictName = self._areaDictionary, addPeriod=True,
forceAlphaSort=True)
#
# Line Wrap
#
fcst = self.endline(fcst, linelength=self._lineLength, breakStr=[" ", "...", "-"])
#
# Finished
#
return fcst
def _postProcessArea(self, fcst, segmentAreas, argDict):
fcst = fcst + "$$\n\n"
return fcst
def _countFilteredAreaList(self, countyTuples, index):
#Returns a dictionary. dictionary is based on the 'index' element
# of the tuple (key) and is a count of the number of those
# records found.
dict = {}
for values in countyTuples:
key = values[index]
count = dict.get(key, 0)
count = count + 1
dict[key] = count
return dict
def _getFilteredAreaList(self, areaList, areaDictName="AreaDictionary",
mode="COUNTY"):
#returns list of sorted tuples:
# [(state, partOfState, partOfState State, zonename)]
#mode='COUNTY','ZONE','CITY'
# Access the UGC information for the area(s) if available
areaDict = ModuleAccessor.ModuleAccessor().variable(areaDictName,
"AreaDictionary")
if areaDict is None:
return []
# sort by zone name
if mode == "ZONE":
areaList.sort()
# Make a list of (state, partOfStateAndState, county) tuples
countyList = []
for areaName in areaList:
if areaDict.has_key(areaName):
entry = areaDict[areaName]
else:
entry = {}
LogStream.logProblem(\
"AreaDictionary missing definition for [" + areaName + "].")
if mode == "COUNTY":
if len(areaName) == 6 and areaName[2] != "C": #not ssCnnn
continue #not a county fips
if entry.has_key("independentCity") and \
entry["independentCity"] == 1:
continue #independent city, when in county mode
elif mode == "CITY":
if len(areaName) == 6 and areaName[2] != "C": #not ssCnnn
continue #not a county/city fips
if not entry.has_key("independentCity") or \
entry["independentCity"] == 0:
continue #not independent city, when in city mode
elif mode == "ZONE":
if len(areaName) == 6 and areaName[2] != "Z": #not ssZnnn
continue #not a zone code
else:
raise Exception, "Illegal mode specified " + mode
if entry.has_key("ugcName") and len(entry['ugcName']):
# Get fullStateName
state = areaName[0:2]
if entry.has_key("fullStateName") and \
len(entry['fullStateName']):
state = entry["fullStateName"]
else:
state = "<fullStateName for " + state + " missing>"
LogStream.logProblem("AreaDictionary does not contain " +\
'fullStateName definition for ', areaName)
# Get part-of-state information with state (not for Zones)
if mode == "ZONE": #marine
partOfState = ""
else:
if entry.has_key("partOfState") and \
len(entry['partOfState']):
partOfState = entry["partOfState"] + ' ' + state
else:
partOfState = "<partOfState> " + state
LogStream.logProblem(\
"AreaDictionary does not contain " +\
'partOfState definition for ', areaName)
# Get county name
county = entry["ugcName"]
# Eliminate the name County and others, if in the name
if mode == "COUNTY":
val = ['County','Counties','Parish','Parishes']
for v in val:
county = county.replace(" " + v, "")
countyList.append((state, partOfState, county))
#missing ugcName
else:
countyList.append(("<ugcName>", "<ugcName>", areaName))
LogStream.logProblem("AreaDictionary does not contain " +\
'ugcName definition for ', areaName)
# Sort by state, part of state, then county
if mode != "ZONE":
countyList.sort() #state, partOfState, county
return countyList
def _makeTextFromMarineTuple(self, countyTuple, lineLength=66, colWidth=22,
mainFormat="This watch includes the following adjacent coastal waters"):
#countyTuple: (state, partOfStateAndState, name)
#extract out the marine zones
mzones = []
for state, partOfState, name in countyTuple:
mzones.append(name)
if len(mzones) == 0:
return ""
return mainFormat + "\n\n" + \
self.formatCountyColumns(mzones, colWidth, lineLength) + '\n\n'
def _makeTextFromCountyTuple(self, countyTuple, lineLength=66, colWidth=22,
mainFormatSingular="In {area} this watch includes {number} {placeType}",
mainFormatPlural="In {area} this watch includes {number} {placeTypes}",
subFormat="In {area}", mode="byState"):
#countyTuple: (state, partOfStateAndState, name)
#The type of text depends upon the mode: "byState" or "byPart"
# "byState" formatting:
# mainFormatSingular/mainFormatPlural (for each state)
# subFormat (for each partOfState)
# column formatting of names
#
# "byPart" formatting:
# (subFormat not used):
# mainFormatSingular/mainFormatPlural (for each partOfState State)
# column formatting of names
# Format
if mode == "byState":
return self._byStateTextFromCountyTuple(countyTuple, lineLength,
colWidth, mainFormatSingular, mainFormatPlural, subFormat)
elif mode == "byPart":
return self._byPartTextFromCountyTuple(countyTuple, lineLength,
colWidth, mainFormatSingular, mainFormatPlural)
else:
raise Exception, "Illegal mode in makeTextFromCountyTuple(): " +\
`mode`
def _byStateTextFromCountyTuple(self, countyTuple, lineLength,
colWidth, mainFormatSingular, mainFormatPlural, subFormat):
#Determine counts for each area
counts = self._countFilteredAreaList(countyTuple, 0)
# Convert countyTuple into format that follows the text format
# byState: [(state, [(partOfStateAndState, [names])]]
geoList = []
geoPList = []
names = []
curState = None
curPart = None
for state, partState, name in countyTuple:
if curState == state:
if curPart == partState:
names.append(name)
else:
if len(names):
geoPList.append((curPart, names))
names = [name]
curPart = partState
else:
if len(names):
geoPList.append((curPart, names))
if len(geoPList):
geoList.append((curState, geoPList))
geoPList = []
names = [name]
curPart = partState
curState = state
if len(names):
geoPList.append((curPart, names))
geoList.append((curState, geoPList))
# Now Format the text
result = ''
for state, partStateNames in geoList:
#special District of Columbia, no parts of state descriptors
if state == "District of Columbia":
result = result + "The District of Columbia\n\n"
continue
ccount = counts.get(state, 0)
if ccount > 1:
header = mainFormatPlural
else:
header = mainFormatSingular
header = string.replace(header, '{area}', state)
header = string.replace(header, '{number}', str(ccount))
if state == "Louisiana":
header = string.replace(header, '{placeType}', "parish")
header = string.replace(header, '{placeTypes}', "parishes")
else:
header = string.replace(header, '{placeType}', "county")
header = string.replace(header, '{placeTypes}', "counties")
result = result + header + '\n\n'
for partState, names in partStateNames:
subheader = subFormat
subheader = string.replace(subheader, '{area}', partState)
result = result + subheader + '\n\n'
counties = self.formatCountyColumns(names, colWidth,
lineLength)
result = result + counties + '\n\n'
return result
def _byPartTextFromCountyTuple(self, countyTuple, lineLength,
colWidth, mainFormatSingular, mainFormatPlural):
#Determine counts for each area
counts = self._countFilteredAreaList(countyTuple, 1)
# Convert countyTuple into format that follows the text format
# byPart: [(partOfStateAndState, [names])]
geoList = []
names = []
curSection = None #partState
for state, partState, name in countyTuple:
if partState == curSection:
names.append(name)
else:
if len(names):
geoList.append((curSection, names))
names = [name]
curSection = partState
if len(names):
geoList.append((curSection, names))
# Now Format the text
result = ''
for partState, names in geoList:
#special District of Columbia
if partState.find("District of Columbia") != -1:
result = result + "The District of Columbia\n\n"
continue
ccount = counts.get(partState, 0)
if ccount > 1:
header = mainFormatPlural
else:
header = mainFormatSingular
header = string.replace(header, '{area}', partState)
header = string.replace(header, '{number}', str(ccount))
if partState.find("Louisiana") != -1:
header = string.replace(header, '{placeType}', "parish")
header = string.replace(header, '{placeTypes}', "parishes")
else:
header = string.replace(header, '{placeType}', "county")
header = string.replace(header, '{placeTypes}', "counties")
counties = self.formatCountyColumns(names, colWidth, lineLength)
result = result + header + '\n\n' + counties + '\n\n'
return result
def allowedHazards(self):
allActions = ["NEW", "EXA", "EXB", "EXT", "CAN", "CON", "EXP"]
return [
('TO.A', allActions, 'Convective'),
('SV.A', allActions, 'Convective')
]
| 28,636 | 7,869 |
"""Main module."""
import asyncio
import logging
from pathlib import Path
from asynccpu import ProcessTaskPoolExecutor
from showroompodcast import CONFIG
from showroompodcast.archiving_task_manager import ArchivingTaskManager
from showroompodcast.showroom_archiver import TIME_TO_FORCE_TARMINATION, ShowroomArchiver
from showroompodcast.showroom_poller import ShowroomPoller
from showroompodcast.slack.slack_client import SlackNotification
class ShowroomPodcast:
"""Main class."""
def __init__(
self, *, path_to_configuraion: Path = None, time_to_force_termination: int = TIME_TO_FORCE_TARMINATION
) -> None:
logging.basicConfig(level=logging.DEBUG)
CONFIG.load(path_to_configuraion)
self.showroom_archiver = ShowroomArchiver(time_to_force_termination=time_to_force_termination)
self.archiving_task_manager = ArchivingTaskManager(CONFIG.list_room_id)
self.logger = logging.getLogger(__name__)
def run(self):
"""Runs"""
try:
asyncio.run(self.archive_repeatedly())
except Exception as error:
self.logger.exception(error)
if CONFIG.slack.bot_token is not None and CONFIG.slack.channel is not None:
SlackNotification(CONFIG.slack.bot_token, CONFIG.slack.channel).post_error(error)
raise error
async def archive_repeatedly(self):
with ProcessTaskPoolExecutor(max_workers=CONFIG.number_process, cancel_tasks_when_shutdown=True) as executor:
showroom_poller = ShowroomPoller(self.showroom_archiver, executor)
while True:
await self.archiving_task_manager.poll_all_rooms(showroom_poller)
| 1,691 | 511 |
# coding=utf-8
import requests
from PyQt5.QtCore import QThread, pyqtSignal
from urllib import request, parse
from utils import LogTools
sysLog = LogTools.SysLogs()
class Http:
signal = None # 括号里填写信号传递的参数
# init 初始化部分
def __init__(self):
self.signal = pyqtSignal(object)
# ============================================ 拦截器相关部分的代码 ============================================ #
# 添加拦截器
def __addInterceptor(self, interceptor, fn):
'''
私有方法,添加拦截器响应函数
:param interceptor: 将要操作的拦截器集合
:param fn: 添加的拦截器响应函数
:return: None
'''
interceptor.append(fn)
return interceptor.__len__() - 1
# 移除拦截器
def __removeInterceptor(self, interceptor, index):
'''
移除已经存在的拦截器
:param interceptor: 将要操作的拦截器集合
:param index: 此参数可以为添加拦截器时返回的index或者拦截器响应函数本身
:return: None
'''
try:
if isinstance(index, int):
if index < interceptor.__len__():
interceptor.remove(interceptor[index])
else:
interceptor.remove(index)
except Exception:
print('remove interceptor failed,because can not find it')
# 添加请求拦截器
def addRequestInterceptor(self, fn):
'''
添加请求拦截器,将在发起请求前进行调用
:param fn: 拦截请求响应函数
:return:
'''
return self.__addInterceptor(self.requestInterceptor, fn)
# 移除请求拦截器
def removeRequestInterceptor(self, index):
'''
移除请求拦截器
:param index: 下标或拦截响应函数本身
:return:
'''
self.__removeInterceptor(self.requestInterceptor, index)
# 添加响应拦截器
def addResponseInterceptor(self, fn):
'''
添加响应拦截器,将在响应后调用
:param fn: 拦截响应响应函数
:return:
'''
return self.__addInterceptor(self.responseInterceptor, fn)
# 移除响应拦截器
def removeResponseInterceptor(self, index):
'''
移除响应函数
:param index: 下标或响应函数本身
:return:
'''
self.__removeInterceptor(self.responseInterceptor, index)
# ============================================ 拦截器相关部分的代码 ============================================ #
def http_get(self, path, callback, params={}):
# 创建线程
# 启动线程
queryStr = '?%s' % parse.urlencode(params)
# 如果没有查询条件就清空queryStr
if queryStr == '?':
queryStr = ''
try:
response = requests.get('https://' + path + queryStr)
response.encoding = 'utf-8'
buffer = response.text;
return buffer;
except Exception as e:
sysLog.warn('获取数据失败:' + e.__str__())
# 请求拦截器、静态成员
Http.requestInterceptor = []
# 响应拦截器、静态成员
Http.responseInterceptor = []
def http_get(path, params={}):
queryStr = '?%s' % parse.urlencode(params)
# 如果没有查询条件就清空queryStr
if queryStr == '?':
queryStr = ''
try:
response = requests.get('http://' + path + queryStr)
response.encoding = 'utf-8'
buffer = response.text;
return buffer;
except Exception as e:
sysLog.warn('请求失败:' + str(e))
if __name__ == '__main__':
data = http_get('https://www.baidu.com')
print(data)
| 3,239 | 1,265 |
import re
import typing
import xml.etree.ElementTree as ET
from unicodedata import name
from typing import *
from jinja2 import Template
keylayout_file = 'keylayout_file'
keylayout_xml = keylayout_file + '.xml'
keylayout_html = keylayout_file + '.html'
TOFU = '\ufffd'
def mapindex_by_modifier(map_to_modifier: Dict[int, str], modifier: str) -> int:
'''Get the index (in the XML list) of the keyboard map, given the modifiers that produce that map.'''
return [k for k, v in map_to_modifier.items() if v == modifier][0]
def map_by_index(tree: ET.Element) -> Dict[int, Dict[int, str]]:
'''Get the keyboard map given the index n its XML list, given the modifiers that produce that map.'''
def to_chr(v: str) -> str:
return chr(int(v, 16)) if len(v) == 6 else v
keyMapSets = tree.findall('./keyMapSet')
assert len(keyMapSets) == 1, 'For now, only supports a single KeyMapSet in the file, found %d' % len(keyMapSets)
theOnlykeyMapSet = keyMapSets[0]
key_maps = {int(keyMap.attrib['index']): {
int(oneKey.attrib['code']): to_chr(oneKey.attrib['output'])
for oneKey in keyMap} for keyMap in theOnlykeyMapSet}
return key_maps
def modifier_by_mapindex(tree: ET.Element) -> Dict[int, str]:
'''Get the modifiers that produce a keyboard map, given its index in the XML.'''
def shorten_modifier_descriptions(s: str) -> str:
'''Abbreviate the names of the modifiers, using Mac modifier icons. Separate with semicolons.'''
conversions = {'Shift': '⇧', 'Option': '⌥', 'Command': '⇧', 'Control': '⌃',
' ': '; '}
for in_, out in conversions.items():
s = re.sub(in_, out, s, flags=re.IGNORECASE)
return s
keyMapSelects = tree.find("./modifierMap").findall('./keyMapSelect')
return {
int(keyMapSelect.attrib['mapIndex']):
shorten_modifier_descriptions(keyMapSelect.find('./modifier').attrib['keys'])
for keyMapSelect in keyMapSelects}
def tweaked_xml() -> str:
'''Read the XML and fix entity markers.'''
def remove_entity_markers(xml_s):
'''
Reformat entities like Ne7 as 0x78e7. This is necessary because
the XML parser can choke on inability to resolve entities, which
we do not want to do anyway.
'''
return re.sub(r'&#(x[\dA-F]{4});', r'0\g<1>', xml_s)
with open(keylayout_xml, 'r') as f:
return remove_entity_markers(f.read())
def build_table(ascii_keyboard, unmodified_nonasciii_keyboard, map_by_index_,
modifier_by_mapindex_: Dict[int, str])-> List[Dict[str, str]]:
def sort_by_asciifirst_and_moddescription_length(modifier_by_mapindex_, ascii_keyboard_):
modifier_by_map_index_items = list(modifier_by_mapindex_.items())
'''
We are using length of modifiers to sort the keyboards, since a single
modifier is more "common" than multiple modifiers, and NO modifiers
is most common of all. However, we put the ASCII keyboard first.
'''
modifier_by_map_index_items.sort(key=lambda item: (item[1].count(';'), item[1]))
ascii_keyboard_dict = {0: ascii_keyboard_} # Put it first
modifier_by_mapindex_ = dict(modifier_by_map_index_items)
modifier_by_mapindex_ = {**ascii_keyboard_dict, **modifier_by_mapindex_}
return modifier_by_mapindex_
def unicode_name(s: str) -> str:
'''Get the official unicode name for a character.'''
if not s:
return ""
names = []
for ch in s:
try:
names.append(name(ch))
except ValueError: # codepoints like 4,12,16,127
# Code points including 1...31 and 127 have no name.
names.append(TOFU) # tofu
return ' & '.join(names)
modifier_by_mapindex_ = sort_by_asciifirst_and_moddescription_length(modifier_by_mapindex_, ascii_keyboard)
rows = []
for idx, modifier in modifier_by_mapindex_.items():
modified_keyboard = map_by_index_[idx]
for key_idx in modified_keyboard:
modified_key = modified_keyboard[key_idx]
if unicode_name(modified_key) not in [TOFU, '']:
if not modifier.strip():
modifier = '<NONE>'
rows.append({
'modifier': modifier,
'ascii': ascii_keyboard[key_idx],
'unmodified_non_ascii_key': unmodified_nonasciii_keyboard[key_idx],
'modified_key': modified_key,
'unicode_name': unicode_name(modified_key)})
return rows
def render(title, rows:List[Dict[str,str]]):
template = """
<!DOCTYPE html>
<html>
<head>
<title>{{ title|escape }}</title>
<meta charset="UTF-8">
<style>
th {
background: #ACA
}
tr:nth-child(even) {background: #CAC}
tr:nth-child(odd) {background: #EEE}
table {
border: 1px solid black;
}
</style>
</head>
<body>
<table>
<tr><th>{%- for k in item_list[0]%}{{k.replace('_',' ').title()|escape}}{%- if not loop.last %}</th><th>{%- endif %}{%- endfor %}</th></tr>
{%- for item in item_list %}
<tr><td>{%- for v in item.values() %}{{v|escape}}{%- if not loop.last %}</td><td>{%- endif %}{%- endfor %}</td></tr>
{%- endfor %}
</table>
</body>
</html>"""
rendered = Template(template).render(title=title, item_list=rows)
with open(keylayout_html, 'w') as f:
f.write(rendered)
def main():
tree = ET.fromstring(tweaked_xml())
map_by_index_ = map_by_index(tree)
modifier_by_mapindex_ = modifier_by_mapindex(tree)
unmodified_nonasciii_keyboard = map_by_index_[mapindex_by_modifier(modifier_by_mapindex_, '')]
ascii_keyboard = map_by_index_[0]
item_list = build_table(ascii_keyboard, unmodified_nonasciii_keyboard, map_by_index_, modifier_by_mapindex_)
title = tree.attrib['name']
render(title, item_list)
if __name__ == '__main__':
main()
pass
| 6,293 | 2,063 |