index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
7,500 | f87c036c1eb5026e088bed62fbc330cfd2ea1952 | # coding=utf8
from __future__ import print_function
from application.controllers import *
from application.models import board
def __return__():
return render_template('board/board.html',
lecturers = board.Lecturer.query.all(), disciplines = board.Discipline.query.all())
def __return_modal__(id):
lecturer = board.Lecturer.query.get(id)
print("esdasd"+lecturer.description)
return render_template("board/modal.html", lecturer = lecturer) |
7,501 | 3d55a5b4e332523025f65e5f5859f4633f4ee9a3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created with YooLiang Technology (侑良科技).
# Author: Qi-Liang Wen (温啓良)
# Web: http://www.yooliang.com/
# Date: 2015/7/12.
from monkey import BasicModel
from monkey import Fields
class WebInformationModel(BasicModel):
class Meta:
label_name = {
"title": u"通用名稱",
"name": u"識別碼",
"domain_registration": u"網域註冊地",
"domain_registration_price": u"網域註冊費用",
"domain_registration_date": u"網域註冊日",
"domain_expiration_date": u"網域到期日",
"space_rental_level": u"伺服器等級",
"space_rental_price": u"空間費用",
"space_rental_date": u"空間租借日",
"space_expiration_date": u"空間到期日",
"manager_company": u"管理公司",
"manager_website": u"公司網址",
"manager_person": u"管理人姓名",
"manager_telephone": u"管理人電話",
"manager_mobile": u"管理人手機",
"manager_email": u"管理人信箱",
"contact_person": u"聯絡人",
"contact_telephone": u"聯絡電話",
"contact_mobile": u"聯絡手機",
"contact_email": u"聯絡信箱",
"contact_address": u"聯絡地址",
"is_enable": u"顯示於前台",
}
title = Fields.StringProperty(required=True)
name = Fields.StringProperty()
domain_registration = Fields.StringProperty()
domain_registration_price = Fields.StringProperty()
domain_registration_date = Fields.DateProperty()
domain_expiration_date = Fields.DateProperty()
space_rental_level = Fields.StringProperty()
space_rental_price = Fields.StringProperty()
space_rental_date = Fields.DateProperty()
space_expiration_date = Fields.DateProperty()
manager_company = Fields.StringProperty(default=u"侑良科技")
manager_website = Fields.StringProperty(default="http://")
manager_person = Fields.StringProperty()
manager_telephone = Fields.StringProperty()
manager_mobile = Fields.StringProperty()
manager_email = Fields.StringProperty()
contact_person = Fields.StringProperty()
contact_telephone = Fields.StringProperty()
contact_mobile = Fields.StringProperty()
contact_email = Fields.StringProperty()
contact_address = Fields.StringProperty()
is_enable = Fields.BooleanProperty(default=True)
@classmethod
def get_by_name(cls, name):
return cls.query(cls.name==name).get()
|
7,502 | 11d96a8a400afb0861b92d8900e003826614c99a | # Generated by Django 3.1.3 on 2020-11-19 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myems', '0004_auto_20201118_1446'),
]
operations = [
migrations.RenameField(
model_name='dg',
old_name='sn',
new_name='id',
),
migrations.AddField(
model_name='dg',
name='code_ean13',
field=models.CharField(default=0, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='dg',
name='commercial_designation_in_english',
field=models.CharField(default=0, max_length=100),
preserve_default=False,
),
migrations.AlterModelTable(
name='dg',
table='dg_gen',
),
]
|
7,503 | c6d6fcc242e1b63104a3f3eb788880635257ff4c | api_id = "2168275"
api_hash = "e011a9cb95b7e7e153aa5840985fc883"
|
7,504 | bdd9ebfa9a2f14d57efd527ca88032bfb0160a5e | from tkinter import *
import mathcalc as c
root= Tk()
root.title("CALCULATOR")
ent=Entry(root,width=35)
ent.grid(row=0,column=0,columnspan=3,padx=10,pady=10)
#ent.grid(row=0,column=0)
ch=''
num=ent.get()
def clicked(num):
current=ent.get()
ent.delete(0,END)
ent.insert(0,str(current)+str(num))
def click_clear():
ent.delete(0,END)
def add():
global ch
ch='+'
clicked('+')
def subtract():
global ch
ch='-'
clicked('-')
def multiply():
global ch
ch='*'
clicked('*')
def divide():
global ch
ch='/'
clicked('/')
def equals():
f_num,s_num=ent.get().split(ch)
res=c.calculate(float(f_num),float(s_num),ch)
ent.delete(0,END)
ent.insert(0,res)
#buttons
but1=Button(root,text="1",padx=40,pady=20,command=lambda: clicked(1))
but2=Button(root,text="2",padx=40,pady=20,command=lambda: clicked(2))
but3=Button(root,text="3",padx=40,pady=20,command=lambda: clicked(3))
but4=Button(root,text="4",padx=40,pady=20,command=lambda: clicked(4))
but5=Button(root,text="5",padx=40,pady=20,command=lambda: clicked(5))
but6=Button(root,text="6",padx=40,pady=20,command=lambda: clicked(6))
but7=Button(root,text="7",padx=40,pady=20,command=lambda: clicked(7))
but8=Button(root,text="8",padx=40,pady=20,command=lambda: clicked(8))
but9=Button(root,text="9",padx=40,pady=20,command=lambda: clicked(9))
but0=Button(root,text="0",padx=40,pady=20,command=lambda: clicked(0))
but_plus=Button(root,text="+",padx=39,pady=20,command=add)
but_sub=Button(root,text="-",padx=40,pady=20,command=subtract)
but_mul=Button(root,text="*",padx=40,pady=20,command=multiply)
but_div=Button(root,text="/",padx=40,pady=20,command=divide)
but_eq=Button(root,text="=",padx=89,pady=20,command=equals)
but_clr=Button(root,text="C",padx=89,pady=20,command=click_clear)
#button place
but7.grid(row=1,column=0)
but8.grid(row=1,column=1)
but9.grid(row=1,column=2)
but4.grid(row=2,column=0)
but5.grid(row=2,column=1)
but6.grid(row=2,column=2)
but1.grid(row=3,column=0)
but2.grid(row=3,column=1)
but3.grid(row=3,column=2)
but0.grid(row=4,column=0)
but_plus.grid(row=5,column=0)
but_sub.grid(row=6,column=0)
but_mul.grid(row=6,column=1)
but_div.grid(row=6,column=2)
but_eq.grid(row=4,column=1,columnspan=2)
but_clr.grid(row=5,column=1,columnspan=2)
root.mainloop()
|
7,505 | 0edca9893d62eea6513543a1d3dd960e9e95d573 | import math
def normal(data,mean,variance):
# print data-mean
return -1*(((data-mean)**2)/(2*variance)) - (0.5 * math.log(2*3.1415*variance))
a = math.log(0.33333) + normal(67.7854,6.0998,13.5408)
b = math.log(0.33333) + normal(67.7854,119.3287,9.4803)
c = math.log(0.33333) + normal(67.7854,65.7801,12.6203)
d = math.exp(a) + math.exp(b) + math.exp(c)
print math.exp(a)
print math.exp(b)
print math.exp(c)
print math.exp(a)/d |
7,506 | 2d3ab575b18144f714f06167f54cd069af4e5895 | num=int(input("enter no"))
def factorial(no):
fact=1
if no <0:
print("-ve no factorial not exist")
else:
for i in range(1,no+1):
fact=fact*i
return fact
print(factorial(num)) |
7,507 | f7a493ab8e9845d0e9da33a0ee45d7c3ef66deb5 | from django.http import HttpResponse
from django.shortcuts import render
def home(request):
return render(request, 'home.html')
def people(request):
return render(request, 'people.html')
def docs(request):
return render(request, 'docs.html')
def gallery(request, page=None):
if page:
return render(request, 'gallery_'+str(page)+'.html')
return render(request, 'gallery.html')
def publications(request):
return render(request, 'publications.html')
def access(request):
return render(request, 'access.html')
|
7,508 | 5cc325758d5bd99ebe49c40af4d2e339bbf64044 | import time
import click
from contextlib import contextmanager
from pathlib import Path
from model.data import CellsDataset
from model.model import build_model, train_transform, test_transform
from model.vis import plot_cells
@contextmanager
def timer(name):
t0 = time.time()
yield
print("{color}[{name}] done in {et:.0f} s{nocolor}".format(
name=name, et=time.time() - t0,
color='\033[1;33m', nocolor='\033[0m'))
@click.group()
def main():
pass
def infer(model, dataset, title):
print(f"Infering for {title} set")
plot_cells(*zip(*dataset))
with timer("Predict the labels"):
preds = model.predict(dataset)
imgs, masks = zip(*dataset)
plot_cells(imgs, masks, preds)
@main.command()
@click.option("--path", type=click.Path(exists=True), default="data/cells")
def train(path):
dirs = [p for p in Path(path).iterdir() if p.is_dir()]
dataset = CellsDataset(dirs[:5], transform=train_transform())
plot_cells(*zip(*dataset))
model = build_model(max_epochs=2)
with timer("Train the model"):
model.fit(dataset)
infer(model, dataset, "train")
# Infer for all types of images
model.set_params(batch_size=1)
test = CellsDataset(dirs[:2], transform=test_transform())
infer(model, test, "test")
if __name__ == '__main__':
main()
|
7,509 | aefb49410e077180a660d17c4c646265a75969a7 | offset = input()
cal = 1030 + int(offset) * 100
if 0 < cal < 2400:
print('Tuesday')
elif cal < 0:
print('Monday')
else:
print('Wednesday')
|
7,510 | 7112eb52aea9be6f8e682b4dacc6b615365c8cea | import numpy as np
from cost_functions import trajectory_cost_fn
import time
class Controller():
def __init__(self):
pass
# Get the appropriate action(s) for this state(s)
def get_action(self, state):
pass
class RandomController(Controller):
def __init__(self, env):
""" YOUR CODE HERE """
pass
def get_action(self, state):
""" YOUR CODE HERE """
""" Your code should randomly sample an action uniformly from the action space """
pass
class MPCcontroller(Controller):
""" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 """
def __init__(self,
env,
dyn_model,
horizon=5,
cost_fn=None,
num_simulated_paths=10,
):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
def get_action(self, state):
""" YOUR CODE HERE """
""" Note: be careful to batch your simulations through the model for speed """
|
7,511 | 8ec981bf8746e09d3865bc20dcfbf2fbd797c145 | import xl2dict
myxlobject= XlToDict()
myxlobject.convert_sheet_to_dict(file_path="Soul Breaks.xlsx", sheet="First Sheet",
filter_variables_dict={"User Type" : "Admin", "Environment" : "Dev"}) |
7,512 | fb787e688da975d37f9fcc39bf5e02957b186982 | # (c) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
import json
from bll.plugins import service
import logging
import pecan
import pymysql.cursors
LOG = logging.getLogger(__name__)
class PreferencesSvc(service.SvcBase):
"""
Simple service to manage user preferences. User preferences are stored as
JSON in a mysql database.
The ``target`` value for this plugin is ``preferences``. See
:ref:`rest-api` for a full description of the request and response formats.
"""
def __init__(self, *args, **kwargs):
super(PreferencesSvc, self).__init__(*args, **kwargs)
config = pecan.conf.db.to_dict()
config['cursorclass'] = pymysql.cursors.DictCursor
self.connection = pymysql.connect(**config)
@service.expose(action='GET')
def _get(self):
return self._get_mysql(self.data.get("user"))
@service.expose(action='POST')
def _post(self):
self._post_mysql(self.data.get("user"),
self.data.get("prefs"))
@service.expose(action='PUT')
def _put(self):
self._put_mysql(self.data.get("user"),
self.data.get("prefs"))
@service.expose(action='DELETE')
def _delete(self):
self._delete_mysql(self.data.get("user"))
# Functions for writing
def _get_mysql(self, user):
with self.connection.cursor() as cursor:
sql = "SELECT `prefs` from `preferences` WHERE `username`=%s"
cursor.execute(sql, user)
row = cursor.fetchone()
cursor.close()
if row is None:
message = self._("User {} does not exist").format(user)
LOG.warn(message)
self.response.error(message)
return
prefs = row.get("prefs")
if isinstance(prefs, dict):
return prefs
return json.loads(prefs)
def _post_mysql(self, user, prefs):
with self.connection.cursor() as cursor:
sql = "INSERT INTO `preferences` (`username`, `prefs`) " + \
"VALUES (%s,%s)"
cursor.execute(sql, [user, json.dumps(prefs)])
cursor.close()
self.connection.commit()
def _put_mysql(self, user, prefs):
with self.connection.cursor() as cursor:
sql = "select count(*) from preferences where username=%s"
cursor.execute(sql, user)
user_found = (cursor.fetchone()['count(*)'] == 1)
if user_found:
sql = "UPDATE `preferences` SET `prefs`=%s WHERE `username`=%s"
cursor.execute(sql, [json.dumps(prefs), user])
cursor.close()
self.connection.commit()
if not user_found:
message = self._(
"Cannot update non-existent user {}").format(user)
LOG.warn(message)
self.response.error(message)
def _delete_mysql(self, user):
with self.connection.cursor() as cursor:
sql = "DELETE FROM `preferences` WHERE `username`=%s"
cursor.execute(sql, user)
cursor.close()
self.connection.commit()
|
7,513 | 6cdaf89d97be8f5ef37ab35f2916a36b4c75ddbe | def pantip(k, n, arr, path,len):
if len == 0:
if sum(path)==k:
path.reverse()
print(path)
return
path.append(arr[len-1])
pantip(k,n,arr,path,len-1)
path.pop()
#backtrack
pantip(k,n,arr,path,len-1)
inp = input('Enter Input (Money, Product) : ').split('/')
arr = [int(i) for i in inp[1].split()]
len = len(arr)
pattern = pantip(int(inp[0]), 0, arr, [],len)
print("Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern".format(arr, inp[0], pattern)) |
7,514 | 9f01483aaa744972fae358577e6f093bd491f357 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
CELERY_TIMEZONE = 'Asia/Shanghai'
# CELERY_RESULT_BACKEND='redis://localhost:6379/1'
# BROKER_URL='redis://localhost:6379/2'
BROKER_BACKEND = 'mongodb' # mongodb作为任务队列(或者说是缓存)
<<<<<<< HEAD
BROKER_URL = 'mongodb://10.6.0.149:27017/' # 队列地址
CELERY_RESULT_BACKEND = 'mongodb://10.6.0.149:27017/' # 消息结果存储地址
=======
BROKER_URL = 'mongodb://127.0.0.1:27017/for_celery' # 队列地址
CELERY_RESULT_BACKEND = 'mongodb://127.0.0.1:27017/for_celery' # 消息结果存储地址
>>>>>>> 8fe8c958956fbce6a95b4d1d541449de074a987b
CELERY_MONGODB_BACKEND_SETTINGS = { # 消息结果存储配置
'host': '127.0.0.1',
'port': 27017,
'database': 'celery',
# 'user':'root',
# 'password':'root1234',
'taskmeta_collection': 'task_meta', # 任务结果的存放collection
}
CELERY_ROUTES = { # 配置任务的先后顺序
'celery_task.tasks.add': {'queue': 'for_add', 'router_key': 'for_add'},
'celery_task.tasks.subtract': {'queue': 'for_subtract', 'router_key': 'for_subtract'}
} |
7,515 | 07d2da14d0122ad2c8407bb13b8567ca62356bef | def build_shift_dict(self, shift):
'''
Creates a dictionary that can be used to apply a cipher to a letter.
The dictionary maps every uppercase and lowercase letter to a
character shifted down the alphabet by the input shift. The dictionary
should have 52 keys of all the uppercase letters and all the lowercase
letters only.
shift (integer): the amount by which to shift every letter of the
alphabet. 0 <= shift < 26
Returns: a dictionary mapping a letter (string) to
another letter (string).
'''
# create a new list of letters based on the shift
shifted_lowercase = list(string.ascii_lowercase[shift:]) + list(string.ascii_lowercase[:shift])
shifted_uppercase = list(string.ascii_uppercase[shift:]) + list(string.ascii_uppercase[:shift])
# empty dict
d = {}
# populate dict for lowercase
for l in range(len(string.ascii_lowercase)):
d[string.ascii_lowercase[l]] = shifted_lowercase[l]
# populate dict for uppercase
for l in range(len(string.ascii_uppercase)):
d[string.ascii_uppercase[l]] = shifted_uppercase[l]
return d |
7,516 | f1c32fe7a29cddf4f881b46f4feab06390a76a44 | # -*- coding: utf-8 -*-
import hashlib
import time
from datetime import datetime, timedelta
# 像访问对象一样, 访问字典
class ObjectLikeDict(dict):
def __getattr__(self, name):
try:
return self[name]
except:
return ''
# 合并字典
def merge_dict(dict1, dict2):
return (lambda a, b: (lambda a_copy: a_copy.update(b) or a_copy)(a.copy()))(dict1 or {}, dict2 or {})
# 转数组
def to_list(obj):
if isinstance(obj, list): return obj
else: return [obj]
# 格式化时间, 默认返回当前时间
def fmt_time(fmt='%Y-%m-%d %H:%M:%S', seconds=None):
if not seconds: seconds = now()
t = datetime.utcfromtimestamp(seconds)
t = t + timedelta(hours=+8) # 时区
return t.strftime(fmt)
# 当前时间戳(精确到秒)
def now():
return int(time.time())
# 字符串MD5值
def md5(s):
m = hashlib.md5(s)
m.digest()
return m.hexdigest()
# Test
if __name__ == "__main__":
dict1 = {'a': 1, 'b': 2}
dict2 = {'c': 3, 'd': 4}
print merge_dict(dict1, dict2)
print md5('')
print now()
print fmt_time() |
7,517 | 7a1a9d2e773fb783d8522f1ea51e753d5d3782e9 | import config
import math
import pygame
import utils
class Rocket:
def __init__(self):
self.x = config.initialPosition['x']*config.game['scale'] + config.game['width']/2;
self.y = config.game['height'] - config.game['floorHeight'] - config.initialPosition['y']*config.game['scale'];
self.angle = config.initialPosition['angle'];
self.angle = utils.wrapToPi(self.angle);
self.dh = config.game['scale']*config.rocket['height']/2; #half display height
self.dw = config.game['scale']*config.rocket['width']/2; # half display height
self.pl = 0 #left motor power
self.pr = 0 #right motor power
def draw(self, display):
pSin = math.sin(self.angle); # precalculated sin
pCos = math.cos(self.angle); # precalculated cos
#main body
pygame.draw.polygon(
display,
config.colors['green'],
[
[
self.x+self.dw*pSin+self.dh*pCos,
self.y+self.dw*pCos-self.dh*pSin,
], [
self.x-self.dw*pSin+self.dh*pCos,
self.y-self.dw*pCos-self.dh*pSin,
], [
self.x-self.dw*pSin-self.dh*pCos,
self.y-self.dw*pCos+self.dh*pSin,
], [
self.x+self.dw*pSin-self.dh*pCos,
self.y+self.dw*pCos+self.dh*pSin,
]
]
);
#left motor
pygame.draw.polygon(
display,
config.colors['red'],
[
[
self.x
+(-self.dh-self.dw*self.pl)*pCos
+(-self.dw/2)*pSin,
self.y
-(-self.dh-self.dw*self.pl)*pSin
+(-self.dw/2)*pCos,
],[
self.x
+(-self.dh)*pCos
+(-self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(-self.dw/6)*pCos,
],[
self.x
+(-self.dh)*pCos
+(-5*self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(-5*self.dw/6)*pCos,
]
]
)
#right motor
pygame.draw.polygon(
display,
config.colors['red'],
[
[
self.x
+(-self.dh-self.dw*self.pr)*pCos
+(self.dw/2)*pSin,
self.y
-(-self.dh-self.dw*self.pr)*pSin
+(self.dw/2)*pCos,
],[
self.x
+(-self.dh)*pCos
+(self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(self.dw/6)*pCos,
],[
self.x
+(-self.dh)*pCos
+(5*self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(5*self.dw/6)*pCos,
]
]
)
def update(self, x, y, angle, leftPower, rightPower):
self.x = x*config.game['scale'] + config.game['width']/2;
self.y = config.game['height'] - config.game['floorHeight'] - y*config.game['scale'];
self.angle = angle
self.angle = utils.wrapToPi(self.angle);
self.pl = leftPower;
if(self.pl<0):
self.pl = 0
elif self.pl>1:
self.pl = 1
self.pr = rightPower;
if(self.pr<0):
self.pr = 0
elif self.pr>1:
self.pr = 1
|
7,518 | 6a400419c26c62471dfc6893cc2d1ff6d88e49f4 | import whoosh.index as index
from whoosh.fields import *
from whoosh.qparser import MultifieldParser
from whoosh import scoring
w = scoring.BM25F(B=0.75, content_B=1.0, K1=1.5)
fieldnames = ["bill_text", "bill_title", "year", "sponsor_name", "subject"]
boosts = {"bill_text": 1, "bill_title": 2.5, "year": 0, "sponsor_name": 0, "subject": 2.0}
#load index:
ix = index.open_dir("final_index")
writer = ix.writer()
#search:
def results(q):
hits = []
with ix.searcher(weighting=w) as searcher:
query = MultifieldParser(fieldnames, ix.schema, fieldboosts=boosts).parse(q)
results = searcher.search_page(query, 1, pagelen=10)
print "\n" + str(len(results)) + " results found!"
print "Displaying top ten results:"
for result in results:
if result["house_or_senate"] == "h":
hs = "hr"
billnum = "hr" + str(result["bill_number"])
isih = "ih"
elif result["house_or_senate"] == "s":
hs = "s"
billnum = "s" + str(result["bill_number"])
isih = "is"
url = "https://www.govtrack.us/data/congress/" + str(result["congress_number"]) + "/bills/" + hs + "/" + hs + str(result["bill_number"]) + "/text-versions/" + isih + "/document.txt"
hits.append({"bill_title":result["bill_title"], "year":result["year"], "url":url, "sponsor_name":result["sponsor_name"]})
return hits
query = raw_input("\nSearch for a term in bill text: ")
query = query.lstrip()
print results(query) |
7,519 | 9033ba0a19d765a83737d59289735a9ffd02abb1 | distance = float(input("Введите начальную дистанцию: "))
target = int(input("Введите целевую дистанцию: "))
day = 1
print("{:>3}-й день: {:.3}".format(day, distance)) # некрасивенько
while target > distance:
day += 1
distance += distance / 10
print("{:>3}-й день: {:.3}".format(day, distance))
print("Ответ: на {}-й день спортсмен достиг результата — не менее {} км.".format(day, target)) |
7,520 | 3bc6091d822fa197dcce3cd75fa9755dc9f93592 | """Scans all files in this project for FIXME and TODO comments and writes them to todos.txt
has to be invoked while being in myLambda/ and not in e.g. myLambda/src"""
import sys
import os
import re
files = []
searchFiles = []
# get all subdirs and its files
for root, dirs, f in os.walk('./'):
files.append((root, f))
# build filepaths out of dir and filename
for f in files[0][1]:
searchFiles.append(files[0][0] + str(f)) # we're in ./ so we can just concat directory's name with filename
for i in range(1,len(files)): # we're in subdirs so we have to add '/' to get real paths
for f in files[i][1]:
searchFiles.append(files[i][0] + '/'+ f)
files = searchFiles
#remove unwanted files
blacklist = ['./todos.txt', './todoGen.py'] + \
[f for f in files if '__' in f or f[-3:] =='pyc' or '.git' in f]
for b in blacklist:
files.remove(b)
print 'searching:'
map(lambda x: sys.stdout.write(x + '\n'), files)
TODO = re.compile('TODO.*') # everything after TODO in one line
FIXME = re.compile('FIXME.*') # everything after FIXME in one line
# gather todos and fixmes
todos = []
fixmes = []
for f in files:
with open(f) as fi:
lineNumber = 0
for line in fi:
lineNumber +=1
todo = re.search(TODO, line)
fixme = re.search(FIXME, line)
if todo:
todos.append((todo.group(0), f, lineNumber))
elif fixme:
fixmes.append((fixme.group(0), f, lineNumber))
f = open('todos.txt', 'w') #write fixmes and todos to todos.txt
f.write('#TODO#\n')
if todos == []:
f.write('All todos are done')
else:
for i in range(len(todos)):
f.write('\t' + todos[i][0] + ' in file ' + todos[i][1] + ' in line ' + str(todos[i][2]) + '\n')
f.write('#FIXME#\n')
if fixmes == []:
f.write('Nothing to fix')
else:
for i in range(len(fixmes)):
f.write('\t'+ fixmes[i][0] + ' in file ' + fixmes[i][1] + ' in line ' + str(fixmes[i][2]) + '\n')
print "Done"
|
7,521 | 5a7b68648898818e0db47f225f3d4b0972cd5b99 | _all__ = ["minning_algo"]
|
7,522 | 1d72a9882aea1e0f808969828ed2e69ecd79ac71 | from typing import Dict, Any
from urllib import request
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Product
from cart.forms import CartAddProductForm
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from .forms import UserForm, UserLogInForm
from django.views import generic
from django.views.generic import View
def product_list(request):
products = Product.objects.filter(available=True)
context = {'products': products,
'user': request.user}
return render(request, 'shop/product/list.html', context)
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id, slug=slug, available=True)
cart_product_form = CartAddProductForm()
context = {'product': product,
'cart_product_form': cart_product_form}
return render(request, 'shop/product/detail.html', context)
class UserFormView(View):
form_class = UserForm
template_name = 'shop/signup.html'
# display blank form
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
# process form data
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
#print(request.user.is_authenticated())
return redirect('/shop/')
return render(request, self.template_name, {'form': form})
def user_login(request):
context = {
'form': UserLogInForm
}
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('/shop/')
else:
context['error'] = "Provide valid credentials"
return render(request, 'shop/login.html', context)
else:
return render(request, 'shop/login.html', context)
def user_logout(request):
if request.method == 'POST':
logout(request)
return render(request, "shop/login.html")
|
7,523 | 40471bfcf05ef45fbb070bbb5bfd4c425fe59b1c | # Differences between Python 2 and Python 3
print "hello world"
# become
print("hello world") # in Pyton 3
raw_input('What is your name?')
# become
input('What is your name?') # in Python 3
# the language of Python
# Reserved words
and
as
assert
break
class
continue
def
del
elif
else
except
finally
for
from
global
if
import
in
is
lambda
nonlocal
not
or
pass
raise
return
try
while
with
yield
# Section 1.2 |
7,524 | dfcfa4fa036fe8c058d66fc0b9ea73ddb9d4446e | from aiogram import Dispatcher
from create_bot import bot
from data_base import sqlite_db
# new user in group
async def new_member(message):
new_user = message.new_chat_members[0]
user_id = new_user['id']
if new_user['username']:
user_name = new_user['username']
elif new_user['first_name']:
user_name = new_user['first_name']
elif new_user['last_name']:
user_name = new_user['last_name']
else:
user_name = 'Пользователь без имени'
await sqlite_db.sql_add_user_to_db(user_id, user_name)
await bot.send_message(message.chat.id, f'Добро пожаловать, {user_name}!\nКоманда - /start переход'
f' в пользовательское меню.\nКоманда - /help помощь по командам бота.')
# left user from group
async def left_member(message):
left_user = message.left_chat_member
user_name = await sqlite_db.sql_get_user_name(left_user['id'])
user_name = user_name[0][0]
await sqlite_db.sql_del_user_from_db(left_user['id'])
await bot.send_message(message.chat.id, f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')
def register_handlers_for_other(dp: Dispatcher):
dp.register_message_handler(new_member, content_types=["new_chat_members"])
dp.register_message_handler(left_member, content_types=["left_chat_member"])
|
7,525 | c35ecad842477fc8501a763f7eb972f6e7fc13e1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 11:14:13 2019
@author: dobri
"""
import numpy as np
from astropy.stats import circmean
x = np.multiply(np.pi,[(0,1/4,2/4,3/4,4/4),(1,5/4,6/4,7/4,8/4),(5/4,5/4,5/4,5/4,5/4),(0/5,2/5,4/5,6/5,8/5)])
s = np.shape(x)
phikprime = np.array(x*0, dtype=complex);
phikprimebar = np.zeros((s[1],1), dtype=complex)
phikbar = np.zeros((s[0],1))
rhok = np.zeros((s[0],1))
for j in range(0,len(x)):
for k in range(0,len(x[j,:])):
phikprime[j,k]=np.complex(np.cos(x[j,k]),np.sin(x[j,k]))
phikprimebar[j] = np.sum(phikprime[j,:])/s[1]
phikbar[j] = np.angle(phikprimebar[j])
rhok[j] = np.absolute(phikprimebar[j])
print(phikbar[j],circmean(x[j,:]),rhok[j]) |
7,526 | dd71feda1ed5ff7ef9dee1573ad63939a3e09691 | # Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.pack import PackOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
class StackFrontExtractor(FrontExtractorOp):
op = 'stack'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
update_attrs = {
'axis': attrs.int('axis', 0)
}
# update the attributes of the node
PackOp.update_node_stat(node, update_attrs)
return cls.enabled
|
7,527 | 4a3611ecd70d80575f9f68bf45d67532a17b9c93 | import itertools
from typing import Tuple, List, Dict, Optional, Hashable, Collection
class Hypergraph:
"""
Represents a hypergraph, consisting of nodes, directed edges,
hypernodes (each of which is a set of nodes) and hyperedges (directed edges
from hypernodes to hypernodes). Contains functionality to extract motifs
from hypergraphs (Fig 2 of
http://www.cs.cornell.edu/~cristian/Patterns_of_participant_interactions.html)
"""
def __init__(self):
# public
self.nodes = dict()
self.hypernodes = dict()
# private
self.adj_out = dict() # out edges for each (hyper)node
self.adj_in = dict() # in edges for each (hyper)node
def add_node(self, u: Hashable, info: Optional[Dict]=None) -> None:
self.nodes[u] = info if info is not None else dict()
self.adj_out[u] = dict()
self.adj_in[u] = dict()
def add_hypernode(self, name: Hashable,
nodes: Collection[Hashable],
info: Optional[dict]=None) -> None:
self.hypernodes[name] = set(nodes)
self.adj_out[name] = dict()
self.adj_in[name] = dict()
# edge or hyperedge
def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None) -> None:
assert u in self.nodes or u in self.hypernodes
assert v in self.nodes or v in self.hypernodes
if u in self.hypernodes and v in self.hypernodes:
assert len(info.keys()) > 0
if v not in self.adj_out[u]:
self.adj_out[u][v] = []
if u not in self.adj_in[v]:
self.adj_in[v][u] = []
if info is None: info = dict()
self.adj_out[u][v].append(info)
self.adj_in[v][u].append(info)
def edges(self) -> Dict[Tuple[Hashable, Hashable], List]:
return dict(((u, v), lst) for u, d in self.adj_out.items()
for v, lst in d.items())
def outgoing_nodes(self, u: Hashable) -> Dict[Hashable, List]:
assert u in self.adj_out
return dict((v, lst) for v, lst in self.adj_out[u].items()
if v in self.nodes)
def outgoing_hypernodes(self, u) -> Dict[Hashable, List]:
assert u in self.adj_out
return dict((v, lst) for v, lst in self.adj_out[u].items()
if v in self.hypernodes)
def incoming_nodes(self, v: Hashable) -> Dict[Hashable, List]:
assert v in self.adj_in
return dict((u, lst) for u, lst in self.adj_in[v].items() if u in
self.nodes)
def incoming_hypernodes(self, v: Hashable) -> Dict[Hashable, List]:
assert v in self.adj_in
return dict((u, lst) for u, lst in self.adj_in[v].items() if u in
self.hypernodes)
def outdegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]:
return [sum([len(l) for v, l in self.adj_out[u].items() if v in
(self.hypernodes if to_hyper else self.nodes)]) for u in
(self.hypernodes if from_hyper else self.nodes)]
def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]:
return [sum([len(l) for u, l in self.adj_in[v].items() if u in
(self.hypernodes if from_hyper else self.nodes)]) for v in
(self.hypernodes if to_hyper else self.nodes)]
def reciprocity_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper
"""
motifs = []
for C1, c1_nodes in self.hypernodes.items():
for c1 in c1_nodes:
motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if
c2 in self.nodes and c2 in self.adj_out[C1]
for e1 in self.adj_out[C1][c2]
for e2 in self.adj_out[c2][c1]]
return motifs
def external_reciprocity_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper
"""
motifs = []
for C3 in self.hypernodes:
for c2 in self.adj_out[C3]:
if c2 in self.nodes:
motifs += [(C3, c2, c1, e1, e2) for c1 in
set(self.adj_out[c2].keys()) - self.hypernodes[C3]
if c1 in self.nodes
for e1 in self.adj_out[C3][c2]
for e2 in self.adj_out[c2][c1]]
return motifs
def dyadic_interaction_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper
"""
motifs = []
for C1 in self.hypernodes:
motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in
self.hypernodes and C1 in self.adj_out[C2]
for e1 in self.adj_out[C1][C2]
for e2 in self.adj_out[C2][C1]]
return motifs
def incoming_triad_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper
"""
motifs = []
for C1 in self.hypernodes:
incoming = list(self.adj_in[C1].keys())
motifs += [(C1, C2, C3, e1, e2) for C2, C3 in
itertools.combinations(incoming, 2)
for e1 in self.adj_out[C2][C1]
for e2 in self.adj_out[C3][C1]]
return motifs
def outgoing_triad_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper
"""
motifs = []
for C1 in self.hypernodes:
outgoing = list(self.adj_out[C1].keys())
motifs += [(C1, C2, C3, e1, e2) for C2, C3 in
itertools.combinations(outgoing, 2)
for e1 in self.adj_out[C1][C2]
for e2 in self.adj_out[C1][C3]]
return motifs
|
7,528 | ffe10ee8b2ebaad565e9aef5047440a067d4e239 | import server.wsgi as flask
import server.grunner as gunicorn
from utils.cfgreader import EnvReader, BoolVar
def use_flask() -> bool:
env_var = BoolVar('USE_FLASK', False)
return EnvReader().safe_read(env_var)
if __name__ == '__main__':
if use_flask(): # dev mode, run the WSGI app in Flask dev server
flask.run()
else: # prod mode, run the WSGI app in Gunicorn
gunicorn.run()
|
7,529 | 480787d7bc0e87df7c59c4deb402eea76643680c | from unidecode import unidecode
import pdb
import os, manage
import re
from datetime import *
import codecs
import csv
import smtplib
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.contrib.auth.models import User
from django.db.models import Q
from django.contrib import messages
from django.http import Http404
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.contrib.auth.models import User
from django.template import RequestContext
from django.utils import simplejson
from django.core.mail import send_mail
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import messages
from pytz import timezone
import pytz
from mailsnake import *
from .feeds import EventFeed
import mijnhercules.settings as settings
from .models import Match, Location
from .forms import MatchPresence
from members.models import Team, Player, MembershipHercules, Pass
# from mijnhercules.forms import *
from members.forms import EditPlayerForm, ArrangeSubstitutesForm, importMatchesForm, importPlayersForm
SITE_ROOT = os.path.dirname(os.path.realpath(manage.__file__))
eu = pytz.utc
#count amount of teams
# @login_required
# def TeamCount():
# t = Team.objects.all()
# return len(t)
def createMatchFeed(request, teamwedstrijd = None):
cal = EventFeed(teamwedstrijd)
return cal.__call__(request)
@login_required
def viewMatch(request, match):
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
teams = m.getHercules()
substituteoptions = False
substitutes = {}
for t in teams:
if m.getSubstitutes(t.pk) != 0:
substituteoptions = True
substitutes[t] = m.getSubstitutes(t.pk)
# raise ValueError
return render(request, 'viewmatch.html', {'match':m, 'hercules':teams, 'substitutes':substitutes, 'substituteoptions':substituteoptions})
def editMatch(request, match):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
if request.method == 'POST' and m.isTeam(teampk):
form = ArrangeSubstitutesForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
# m.substitutesneeded = cd['substitutesneeded']
m.setSubstitutes(team = teampk, amountsubsneeded = cd['substitutesneeded'])
m.save()
return render(request, 'player/editplayer_complete.html')
else:
if m.isTeam(teampk):
form = ArrangeSubstitutesForm(initial={'substitutesneeded': m.getSubstitutesNeeded(teampk)})
u1 = User.objects.get(username=request.user.username)
player = u1.get_profile()
if player.gender == 'V':
substituteWilling = Player.women.filter(substitutewilling=True)
elif player.gender == 'M':
substituteWilling = Player.men.filter(substitutewilling=True)
presentplayers = m.getPresentPlayers(player.team_member.pk)
return render(request, 'match.html', {'match':m, 'form': form, 'substitutes':substituteWilling, 'presentplayers':presentplayers})
else:
raise Http404
def readMatch(f):
# with open(f, 'rU') as csvfile:
# data = csv.reader(csvfile, delimiter=';', dialect=csv.excel_tab)
# data.next()
# data = f.read()
# data = data.splitlines()
# dialect = csv.Sniffer().sniff(codecs.EncodedFile(f,"utf-8").read(1024))
f.open()
# check whether headers are indicative of a good csv file:
reader = csv.reader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
try:
assert 'Wedstrijdnummer' and 'Wedstrijddatum (niet geformatteerd)' and 'Aanvangstijd' and 'Aanduiding' and \
'Thuis team' and 'Uit team' and 'Sport omschrijving' and 'Veld' and 'Accommodatie naam' and 'Plaats' in reader.next()
except:
# mail_admins("Foute wedstrijd upload", "Probleem met CSV upload", fail_silently=False)
return [], "Foutje: het lijkt geen csv bestand te zijn."
f.close()
# get min and max daterange so cancelled matches can be deleted later on:
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
dates = []
for row in data:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
dates.append(date)
mindate = min(dates)
maxdate = max(dates)
existingmatches = Match.objects.filter(date__lte=maxdate).filter(date__gte=mindate)
f.close()
# start saving matches
savedmatches = []
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
for row in data:
if "Zaal" in row['Aanduiding']:
# add locations if not yet existent in the db
try:
loca = re.match(r'(.*)\sveld', row['Veld'])
hall = loca.group(1)
loc = Location.objects.get(name=hall)
#print "Existing", loc
except:
loc = re.match(r'(.*)\sveld', row['Veld'])
loc = Location.objects.create(name=loc.group(1))
loc.save()
#add team if not yet existent in the db
try:
t1 = Team.objects.get(number=row['Thuis team'])
except:
t1 = Team.objects.create(number = row['Thuis team'], level = '99')
t1.save()
try:
t2 = Team.objects.get(number=row['Uit team'])
except:
t2 = Team.objects.create(number = row['Uit team'], level = '99')
t2.save()
# get datetime field:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
#get matches:
try:
m = Match.objects.get(nrid=row['Wedstrijdnummer'])
m.date = date
m.teamhome = t1
m.teamaway = t2
m.location = loc
m.save()
savedmatches.append(m)
#print m
# saveMatch(m, row[1] + row[2], t1, t2, loc)
except:
#print "except match with %s and %s" % (t1, t2)
m = Match(
nrid=row['Wedstrijdnummer'],
date = date,
teamhome = t1,
teamaway = t2,
location = loc)
m.save()
savedmatches.append(m)
# delete cancelled matches:
for e in existingmatches:
if e not in savedmatches:
e.delete()
f.close()
return savedmatches, None
def importMatch(request):
matches = Match.objects.exclude(date__lte=date.today()).order_by('date')
if request.method == 'POST':
form = importMatchesForm(request.POST, request.FILES)
if form.is_valid():
savedmatches, fail = readMatch(request.FILES['matches'])
# request.FILES['matches'].open("rb")
# portfolio = csv.DictReader(request.FILES['uploadFile'].file)
return render(request, 'savematch_success.html', {'savedmatches':savedmatches, 'fail': fail})
else:
form = importMatchesForm()
return render(request, 'savematch.html', {'form': form, 'matches': matches})
def viewMyMatches(request):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
matches = Match.objects.get_my_matches(teampk)
presentmatches = {}
for m in matches:
if m.playerPresent(teampk, u1):
status = 'Aanwezig'
else:
status = 'Afwezig'
presentmatches[m] = MatchPresence(initial = status)
# raise ValueError
return render(request, 'mymatches.html', {'mymatches': matches, 'presentmatches':presentmatches})
def offerSubstitute(request, matchpk, teampk, substitutepk):
match = Match.objects.get(pk=matchpk)
match.addSubstitute(teampk = teampk, player = Player.objects.get(pk=substitutepk))
messages.add_message(request, messages.SUCCESS, 'Je hebt jezelf aangemeld als mogelijke invaller. Goed bezig!!')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args=matchpk,)
return redirect(reverse(viewMatch, args=(matchpk,)))
def cancelSubstituteOffer(request, matchpk, teampk, substitutepk):
match = Match.objects.get(pk=matchpk)
match.removeSubstitute(teampk=teampk, player =Player.objects.get(pk=substitutepk))
# return render(request, 'substitutewilling_cancellation.html')
messages.add_message(request, messages.SUCCESS, 'Je afmelding als mogelijke invaller is doorgegeven.')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args=matchpk,)
return redirect(reverse(viewMatch, args=(matchpk,)))
def addMatchPresence(request, matchpk, teampk, playerpk):
match = Match.objects.get(pk=matchpk)
match.addMatchPresence(teampk = teampk, player = Player.objects.get(pk=playerpk))
messages.add_message(request, messages.SUCCESS, 'Je hebt jezelf aangemeld voor deze wedstrijd!!')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args=matchpk,)
return redirect(reverse(editMatch, args=(matchpk,)))
def removeMatchPresence(request, matchpk, teampk, playerpk):
match = Match.objects.get(pk=matchpk)
match.removeMatchPresence(teampk=teampk, player =Player.objects.get(pk=playerpk))
# return render(request, 'substitutewilling_cancellation.html')
messages.add_message(request, messages.SUCCESS, 'Je afmelding voor deze wedstrijd is doorgegeven.')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args=matchpk,)
return redirect(reverse(editMatch, args=(matchpk,))) |
7,530 | 2236591b3a30f51442beb20c6c43cc9e6cd921d2 | # import sys
# sys.stdin = open("농작물input.txt")
T = int(input())
for n in range(1, T+1):
N = int(input())
arr = [list(map(int, list(input()))) for _ in range(N)]
# print(arr)
a = N//2
b = N//2
result = 0
for i in range(N):
for j in range(a, b+1):
result += arr[i][j]
print(result)
if i < N//2:
a += -1
b += 1
else:
a += 1
b += -1
print("#{0} {1}".format(n, result)) |
7,531 | 2023e0b749338488e63cbbb475b7a915bccccce0 | from subprocess import check_output
import json
import datetime
date = datetime.datetime.now()
mo = date.month
day = date.day
year = date.year
str = '{0}-{1}-{2}'.format(mo, day, year)
instances = json.loads(check_output("aws lightsail get-instances", shell=True))
inst_names = []
inst_dict = {}
for instance in instances['instances']:
inst_names.append(instance['name'])
inst_dict[instance['name']] = []
print(inst_names)
snapshots = json.loads(check_output("aws lightsail get-instance-snapshots", shell=True))
for snapshot in snapshots['instanceSnapshots']:
inst_dict[snapshot['fromInstanceName']].append(snapshot)
for instance, snapshots in inst_dict.items():
print(json.dumps(json.loads(
check_output("aws lightsail create-instance-snapshot --instance-name " + instance + " --instance-snapshot-name " + instance + "-" + str,
shell=True))))
if len(snapshots) > 1:
sorted_snapshots = sorted(snapshots, key=lambda k: k['createdAt'])
print(json.dumps(json.loads(check_output("aws lightsail delete-instance-snapshot --instance-snapshot-name " + sorted_snapshots[0]['name'], shell=True))))
|
7,532 | 290811317ddb49a7d2a9f44ab7e0b6d201db12e1 | from recipes.almahelpers import fixsyscaltimes # SACM/JAO - Fixes
__rethrow_casa_exceptions = True
context = h_init()
context.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')
context.set_state('ProjectSummary', 'piname', 'unknown')
context.set_state('ProjectSummary', 'proposal_title', 'unknown')
context.set_state('ProjectStructure', 'ous_part_id', 'X113490217')
context.set_state('ProjectStructure', 'ous_title', 'Undefined')
context.set_state('ProjectStructure', 'ppr_file', '/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml')
context.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')
context.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')
context.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')
context.set_state('ProjectStructure', 'ousstatus_entity_id', 'uid://A001/X1296/X1fd')
try:
hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb', 'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])
hsd_flagdata(pipelinemode="automatic")
h_tsyscal(pipelinemode="automatic")
hsd_tsysflag(pipelinemode="automatic")
hsd_skycal(pipelinemode="automatic")
hsd_k2jycal(pipelinemode="automatic")
hsd_applycal(pipelinemode="automatic")
hsd_baseline(pipelinemode="automatic")
hsd_blflag(pipelinemode="automatic")
hsd_baseline(pipelinemode="automatic")
hsd_blflag(pipelinemode="automatic")
hsd_imaging(pipelinemode="automatic")
finally:
h_save()
|
7,533 | 3185b6b1902099caed66ce6f97cd1b9940261fc1 | import torch.nn as nn
from layers import maskAConv, MaskBConvBlock
class PixelCNN(nn.Module):
def __init__(self, n_channel=3, h=128, discrete_channel=256):
"""PixelCNN Model"""
super(PixelCNN, self).__init__()
self.discrete_channel = discrete_channel
self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)
MaskBConv = []
for i in range(15):
MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))
self.MaskBConv = nn.Sequential(*MaskBConv)
# 1x1 conv to 3x256 channels
self.out = nn.Sequential(
nn.ReLU(),
nn.Conv2d(2 * h, 1024, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, n_channel * discrete_channel, kernel_size=1, stride=1, padding=0))
def forward(self, x):
"""
Args:
x: [batch_size, channel, height, width]
Return:
out [batch_size, channel, height, width, 256]
"""
batch_size, c_in, height, width = x.size()
# [batch_size, 2h, 32, 32]
x = self.MaskAConv(x)
# [batch_size, 2h, 32, 32]
x = self.MaskBConv(x)
# [batch_size, 3x256, 32, 32]
x = self.out(x)
# [batch_size, 3, 256, 32, 32]
x = x.view(batch_size, c_in, self.discrete_channel, height, width)
# [batch_size, 3, 32, 32, 256]
x = x.permute(0, 1, 3, 4, 2)
return x
|
7,534 | 04097e63de5cd94ca8921be5cb6c2155c1e7bc20 | import pathlib
import sys
import yaml
from google.protobuf.json_format import ParseError
sys.path = [p for p in sys.path if not p.endswith('bazel_tools')]
from tools.config_validation.validate_fragment import validate_fragment
def main():
errors = []
for arg in sys.argv[1:]:
try:
validate_fragment(
"envoy.config.bootstrap.v3.Bootstrap",
yaml.safe_load(pathlib.Path(arg).read_text()))
except (ParseError, KeyError) as e:
errors.append(arg)
print(f"\nERROR (validation failed): {arg}\n{e}\n\n")
if errors:
raise SystemExit(f"ERROR: some configuration files ({len(errors)}) failed to validate")
if __name__ == "__main__":
main()
|
7,535 | ad622ff2e1d9286246b2175694a9ae796f8d2557 | ''' This module creates the models/tables in the database
catalog using sqlalchemy '''
from catalog import db
class Items(db.Model):
''' Model to store all the information about an item '''
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String)
item = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
image = db.Column(db.String)
category = db.Column(db.String, nullable=False)
price = db.Column(db.String, nullable=False)
def __init__(self, email, item, description, image, category, price):
self.email = email
self.item = item
self.description = description
self.image = image
self.category = category
self.price = price
@property
def serialize(self):
''' Function to return a json object for each
instance of the class Items '''
return { 'id': self.id,
'item': self.item,
'description': self.description,
'image': self.image,
'category': self.category,
'price': self.price }
def __repr__(self):
''' Functon to represent the class instance '''
return '<item {}>'.format(self.item)
|
7,536 | 608c116cd42132bd63be5056f0aaf5c78933886e | # SPDX-FileCopyrightText: 2021 John Park for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import random
import board
import audiomp3
import audiopwmio
from adafruit_crickit import crickit
ss = crickit.seesaw # Crickit seesaw setup
button = crickit.SIGNAL1 # momentary switch to trigger animation
ss.pin_mode(button, ss.INPUT_PULLUP)
LED = crickit.SIGNAL4 # standard LED for eyeball lighting
ss.pin_mode(LED, ss.OUTPUT)
attract_switch = crickit.SIGNAL8 # attract mode switch or jumper
ss.pin_mode(attract_switch, ss.INPUT_PULLUP)
audio = audiopwmio.PWMAudioOut(board.A0) # Feather outputs this pin to Crickit amplifier
audio_files = [ # use your own mono .mp3 files
"phrase_01.mp3",
"phrase_02.mp3",
"phrase_03.mp3"
]
current_audio_file = 0
# two motors
motor_eye = crickit.dc_motor_1
motor_lid = crickit.dc_motor_2
def open_lid():
motor_lid.throttle = 1 # full speed open
time.sleep(0.25)
motor_lid.throttle = 0 # hold
def close_lid():
motor_lid.throttle = -1 # full speed closed
time.sleep(0.25)
motor_lid.throttle = 0
def blink(times):
for _ in range(times):
ss.digital_write(LED, True)
time.sleep(0.1)
ss.digital_write(LED, False)
time.sleep(0.1)
def eye_look():
motor_eye.throttle = random.uniform(0.6, 1.0)
time.sleep(random.random()) # 0 to 1.0 seconds
motor_eye.throttle = 0
time.sleep(random.random())
motor_eye.throttle = random.uniform(-1.0, -0.6)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
while True:
if ss.digital_read(attract_switch): # regular mode, attrack switch not closed/shorted
if not ss.digital_read(button): # button has been pressed
decoder = audiomp3.MP3Decoder(open("ring.mp3", "rb"))
audio.play(decoder)
while audio.playing:
pass
open_lid()
blink(3)
ss.digital_write(LED, True) # light the eye
decoder = audiomp3.MP3Decoder(open(audio_files[current_audio_file], "rb"))
audio.play(decoder)
while audio.playing:
eye_look()
motor_eye.throttle = 0 # audio is finished, pause the eye
blink(5)
close_lid()
current_audio_file = ((current_audio_file + 1) % (len(audio_files))) # go to next file
else: # attract mode
open_lid()
blink(3)
ss.digital_write(LED, True)
for _ in range(4):
eye_look()
time.sleep(1)
blink(5)
close_lid()
time.sleep(random.randint(2, 8))
|
7,537 | 8adf8cfc72d5af955bf7509d3573a9bcc7c0845e | import inspect
import re
import openquake.hazardlib.source as oqsrc
# List of valid attributes for an area source
AREAS_ATTRIBUTES = set(['source_id',
'name',
'tectonic_region_type',
'mfd',
'rupture_mesh_spacing',
'magnitude_scaling_relationship',
'rupture_aspect_ratio',
'temporal_occurrence_model',
'upper_seismogenic_depth',
'lower_seismogenic_depth',
'nodal_plane_distribution',
'hypocenter_distribution',
'polygon',
'area_discretization'])
AREAS_ATTRIBUTES |= set(['gr_aval',
'gr_bval',
'source_type'])
# List of valid attributes for a simple source
SIMPLE_FAULT_ATTRIBUTES = set(['source_id',
'name',
'tectonic_region_type',
'mfd',
'rupture_mesh_spacing',
'magnitude_scaling_relationship',
'rupture_aspect_ratio',
'temporal_occurrence_model',
'upper_seismogenic_depth',
'lower_seismogenic_depth',
'fault_trace',
'dip',
'rake',
'hypo_list',
'sliprate'])
SIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval',
'gr_bval',
'dip',
'rake',
'hypo_list',
'slip_list'])
SIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval',
'gr_bval',
'source_type'])
# This adds support for shapefiles created by the OpenQuake-engine
SIMPLE_FAULT_ATTRIBUTES |= set([''])
# Create the set of valid source types
SOURCE_TYPES = set()
for name, obj in inspect.getmembers(oqsrc):
if inspect.isclass(obj):
if not re.search('Rupture', name):
SOURCE_TYPES.add(name)
class OQtSource(object):
"""
A container for information necessary to build and/or characterise an
earthquake source
:parameter str source_id:
The ID of the source
:parameter str source_type:
Source type i.e. Object name amongst the ones admitted in the
OpenQuake Hazardlib.
"""
def __init__(self, *args, **kwargs):
# Checks
if len(args):
self.source_id = args[0]
if len(args) > 1:
self.source_type = args[1]
if len(kwargs):
self.__dict__.update(kwargs)
# Check mandatory attributes: ID
if 'source_id' not in self.__dict__:
raise ValueError('Source must have an ID')
elif not isinstance(self.source_id, str):
raise ValueError('ID must be a string')
# Check mandatory fields: SOURCE TYPE
if 'source_type' not in self.__dict__:
raise ValueError('Source must have a type')
if self.source_type not in SOURCE_TYPES:
raise ValueError('Unrecognized source type: %s' % self.source_type)
if 'source_type' in self.__dict__:
attribute_set = AREAS_ATTRIBUTES
elif 'source_type' in self.__dict__:
attribute_set = SIMPLE_FAULT_ATTRIBUTES
else:
raise ValueError('Unsupported source type')
# Check attributes
for key in self.__dict__:
if key not in attribute_set:
print ('Attribute set', attribute_set)
msg = 'Parameter %s not compatible with this source' % (key)
raise ValueError(msg)
def get_info(self):
for key in self.__dict__:
print ('%30s:' % (key), getattr(self, key))
|
7,538 | 6f271e6cfb03977d52c50562c3c394b962c9af83 | # vim:sw=4 ts=4 et:
# Copyright (c) 2015 Torchbox Ltd.
# tomasz.knapik@torchbox.com 2017-12-07
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely. This software is provided 'as-is', without any express or implied
# warranty.
#
from django import forms
from .utils import render_markdown
from .widgets import MarkdownTextarea
try:
from wagtail.core.blocks import TextBlock
except ImportError:
from wagtail.wagtailcore.blocks import TextBlock
class MarkdownBlock(TextBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field = forms.CharField(
required=required, help_text=help_text, widget=MarkdownTextarea()
)
super(MarkdownBlock, self).__init__(**kwargs)
def render_basic(self, value, context=None):
return render_markdown(value, context)
|
7,539 | 1e02d584cde0cdf251aa36abd27b683219ef87ed | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 01:03:35 2020
@author: Jordan
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import date
## from COVID19_Simple import *
from COVID19_Diff import calc_diff_country
### Dash Stuff ###
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import math
### Initial Code Block; Set Up Data ###
urls = ['https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv',
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv',
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv']
### Base Country Data (and Transformations)
final_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv')
final_df = calc_diff_country(final_df)
final_df['Date'] = pd.to_datetime(final_df['Date'])
final_df['Country_Region'] = final_df['Country_Region'].astype(str)
### 1000 Cases, 10 Deaths, 10 Recovered ### (Global)
## 1000 Cases ##
cases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date']
cases_1000_start = cases_1000_start.reset_index()
cases_1000_start = cases_1000_start.rename(columns={"Date":"Start_Date"})
final_df['Country_Region'] = final_df['Country_Region'].str.strip()
cases_1000_start = pd.merge(cases_1000_start,final_df, on = ['Country_Region'],how='right')
cases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date'])
cases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date'])
cases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()]
cases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] - cases_1000_start['Start_Date']).dt.days
## 100 Deaths ##
deaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date']
deaths_100_start = deaths_100_start.reset_index()
deaths_100_start = deaths_100_start.rename(columns={"Date":"Start_Date"})
final_df['Country_Region'] = final_df['Country_Region'].str.strip()
deaths_100_start = pd.merge(deaths_100_start,final_df, on = ['Country_Region'],how='right')
deaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date'])
deaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date'])
deaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()]
deaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] - deaths_100_start['Start_Date']).dt.days
## Mortality Ratios ##
mort = final_df.groupby(['Country_Region'])['Date'].max().reset_index()
mort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left')
mort['Mortality_Percent'] = (mort['Deaths'] / mort['Confirmed'])*100.00
colors_dict_global = {'Europe':'#1D6996','Asia':'#CC503E','Africa':'#94346E', 'North America':'#38A6A5', 'Middle East': '#EDAD08', 'South America':'#E17C05', 'Caribbean & Central America':'#0F8554', 'Oceania':'#73AF48'}
### Dash Portion of the Script ###
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server=app.server
app.layout = html.Div(children=[
html.H2(children='COVID-19 Dashboard'),
html.H4(children='A Basic Dashboard to Help Track the COVID-19 Pandemic'),
html.Br(),
html.H5(children='Global View'),
html.P(children='The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'),
html.Div([html.Ul([html.Li([html.B('Cumulative Cases by Country Since First 1000 Cases: '),'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis']),
html.Li([html.B('Cumulative Cases by Country Since First 100 Deaths: '),'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis']),
html.Li([html.B('Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '), 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)']),
html.Li([html.B('Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '), 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'])])], style={'font-size': 12}),
html.Br(),
dcc.Dropdown(id='global-dropdown', options=[{'label':y, 'value':y} for y in ['Global Cases Trend', 'Global Deaths Trend', '% Mortality by Confirmed Cases (Top 20 Countries)','Recoveries vs. Deaths By Country']], placeholder = 'Pick Graphs From Here...'),
dcc.Graph(id='global-box-1'),
html.Br(),
html.H5(children='Country View'),
html.P('The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'),
html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B('Confirmed: '), 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']),
html.Li([html.B('Recovered: '), 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']),
html.Li([html.B('Deaths: '),'Cumulative Deaths from COVID-19 since January 22nd, 2020']),
html.Li([html.B('Total and Daily Confirmed Cases: '), 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'])])]),
dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value': x} for x in list(final_df.Country_Region.unique())], placeholder = 'Pick a Country From Here...'),
dcc.Dropdown(id='main-dropdown-2', placeholder = 'Pick Graphs From Here...'),
dcc.Graph(id='box-1'),
html.Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id='btext1'), dcc.Graph(id='subplot1')], className = 'four columns', style={'color': '#648FFF'}),
html.Div([html.H6(children='Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id='subplot2')], className = 'four columns', style={'color': '#DC267F'}),
html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id='btext3'), dcc.Graph(id='subplot3')], className = 'four columns', style={'color': '#009E73', 'layout':'right'})], className="row")
])
## Callback Functionality ##
@app.callback(
Output(component_id='global-box-1', component_property='figure'),
[Input(component_id='global-dropdown', component_property='value')])
def global_update(select_global):
if select_global == 'Global Cases Trend' or select_global is None:
fig1000 = []
anno = []
for group, dataframe in cases_1000_start.groupby(by='Country_Region'):
di = dataframe.sort_values(by=['Days Since 1000 Cases'])
trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(),
y=di['Confirmed'].tolist(),
mode='lines',
line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1),
opacity=0.6,
text= di.Country_Region.tolist(),
legendgroup=list(di.loc[:, 'Continent'])[0],
hovertemplate='<b>%{text}</b><br>'+'<br>Confirmed Cases: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>',
showlegend=False)
a = {'x': int(di['Days Since 1000 Cases'].max()+1.5),
'y':np.log10(int(di['Confirmed'].max())),
'xref':'x', 'yref':'y',
'showarrow':False,
'text':list(di.loc[:, 'Country_Region'])[0],
'xanchor':'right',
'yanchor':'middle',
'align':'center',
'font':{'size':8, 'color':'black'},
'bordercolor':"#ffffff",
'borderwidth':1,
'borderpad':1,
'bgcolor':"#ffffff",
'opacity':0.6}
fig1000.append(trace)
anno.append(a)
fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())),
y = [1000 * (math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))],
name='Cases Double Every 3 Days',
mode='lines',
opacity=.25,
line = dict(color='grey', width=3, dash='dash'),
text=['# of Cases Double Every 3 Days'],
hovertemplate='<b>Cases Double Every 3 Days</b>',
showlegend=True))
fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())),
y = [1000 * (math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))],
name='Cases Double Every 7 Days',
mode='lines',
opacity=.25,
line = dict(color='grey', width=3, dash='dot'),
text=['# of Cases Double Every 7 Days'],
hovertemplate='<b>Cases Double Every 7 Days</b>',
showlegend=True))
layout_global = go.Layout(yaxis={'title':'Number of Confirmed Cases', 'range':[np.log10(1000), np.log10(cases_1000_start['Confirmed'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},
title='Overall Confirmed Cases',
xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start['Days Since 1000 Cases'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno)
fig_global={'data':fig1000, 'layout': layout_global}
return fig_global
elif select_global == 'Global Deaths Trend':
fig100 = []
anno = []
for group, dataframe in deaths_100_start.groupby(by='Country_Region'):
di = dataframe.sort_values(by=['Days Since 100 Deaths'])
trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(),
y=di['Deaths'].tolist(),
mode='lines',
line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1),
opacity=0.6,
text= di.Country_Region.tolist(),
legendgroup=list(di.loc[:, 'Continent'])[0],
hovertemplate='<b>%{text}</b><br>'+'<br>Deaths: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>',
showlegend=False)
a={'x': int(di['Days Since 100 Deaths'].max()+1.5),
'y':np.log10(int(di['Deaths'].max())),
'xref':'x', 'yref':'y',
'showarrow':False,
'text':list(di.loc[:, 'Country_Region'])[0],
'xanchor':'right',
'yanchor':'middle',
'align':'center',
'font':{'size':8, 'color':'black'},
'bordercolor':"#ffffff",
'borderwidth':1,
'borderpad':1,
'bgcolor':"#ffffff",
'opacity':0.6}
fig100.append(trace)
anno.append(a)
fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())),
y = [100 * (math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))],
name='Deaths Double Every 3 Days',
mode='lines',
opacity=.25,
line = dict(color='grey', width=3, dash='dash'),
text=['# of Deaths Double Every 3 Days'],
hovertemplate='<b>Deaths Double Every 3 Days</b>',
showlegend=True))
fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())),
y = [100 * (math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))],
name='Deaths Double Every 7 Days',
mode='lines',
opacity=.25,
line = dict(color='grey', width=3, dash='dot'),
text=['# of Deaths Double Every 7 Days'],
hovertemplate='<b>Deaths Double Every 7 Days</b>',
showlegend=True))
layout_global = go.Layout(yaxis={'title':'Number of Deaths', 'range':[np.log10(100), np.log10(cases_1000_start['Deaths'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},
title='Overall Deaths',
xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno)
fig_global={'data':fig100, 'layout': layout_global}
return fig_global
elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':
figmort = []
anno =[]
m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)
m = m.sort_values(by=['Mortality_Percent'], ascending=True).reset_index()
for i in range(len(m)):
m1 = m.loc[i, 'Country_Region']
#m1 = [str(i) for i in m1]
m2 = m.loc[i, 'Mortality_Percent']
#m2 = [str(round(i, 2)) for i in m2]
trace = go.Bar(name='Observed Case - Mortality Ratio',
x = [m2],
y= [m1],
text = [round(m.loc[i, 'Mortality_Percent'], 2)],
orientation ='h',
textposition='auto',
marker = dict(color='#FFB000', opacity=0.6, line=dict(color='rgba(255,176,0, 1)', width=1)),
hovertemplate='<b>%{y}</b><br>'+'<br>Observed Case Mortaility Pct: %{text}%<br>',
showlegend=False)
figmort.append(trace)
layout_global = go.Layout(yaxis={'title':'Country / Region','fixedrange':True, 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Observed Case - Mortality Ratio',
xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [0, m['Mortality_Percent'].max() + 2], 'fixedrange':True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=750, hovermode='closest')
fig_global={'data':figmort, 'layout': layout_global}
return fig_global
elif select_global == 'Recoveries vs. Deaths By Country':
figscat = []
rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >=100)].reset_index()
for i in range(len(rc)):
scat = go.Scatter(
x=[rc.loc[i, 'Deaths']],
y=[rc.loc[i, 'Recovered']],
mode='markers+text',
text=[rc.loc[i, 'Country_Region']],
marker_color=(colors_dict_global[rc.loc[i, 'Continent']]),
showlegend=False,
marker=dict(size=12,line_width=1, opacity=0.75),
hovertemplate='<b>%{text}</b><br>'+'<br>Recoveries: %{y}<br>'+'Deaths: %{x}<br>',
textposition='bottom center',
textfont=dict(size=10, color='rgba(0, 0, 0, 0.6)')
)
figscat.append(scat)
figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),
y = [i for i in list(np.linspace(100, rc['Deaths'].max(), 3))],
mode='lines',
name='Deaths = Recoveries',
opacity=.25,
line = dict(color='grey', width=1),
text=['# of Deaths = # of Recoveries'],
hovertemplate='<b># of Deaths = # of Recoveries</b>',
showlegend=True))
figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),
y = [i*2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))],
mode='lines',
name='2 Recoveries for Every Death',
opacity=.25,
line = dict(color='green', width=3, dash='dash'),
text=['2 Recoveries for Every Death'],
hovertemplate='<b>2 Recoveries for Every Death</b>',
showlegend=True))
figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),
y = [i/2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))],
mode='lines',
name='2 Deaths for Every Recovery',
opacity=.25,
line = dict(color='firebrick', width=3, dash='dash'),
text=['2 Deaths for Every Recovery'],
hovertemplate='<b>2 Deaths for Every Recovery</b>',
showlegend=True))
layout_global = go.Layout(yaxis={'title':'Number of Recoveries','fixedrange':True, 'automargin': True, 'range':[np.log10(100), np.log10(rc['Recovered'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},
title='Recoveries vs. Deaths, By Country',
xaxis={'title': 'Number of Deaths','fixedrange':True, 'range':[np.log10(100), np.log10(rc['Deaths'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False}, height=750, hovermode='closest')
fig_global={'data':figscat, 'layout': layout_global}
return fig_global
@app.callback(
[Output(component_id='main-dropdown-2', component_property = 'options'),
Output(component_id='btext1', component_property='children'),
Output(component_id='subplot1', component_property = 'figure'),
Output(component_id='btext2', component_property='children'),
Output(component_id='subplot2', component_property = 'figure'),
Output(component_id='btext3', component_property='children'),
Output(component_id='subplot3', component_property = 'figure')],
[Input(component_id='main-dropdown', component_property = 'value')])
def update_country(selected_country):
if selected_country is None:
selected_country = 'Canada'
options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases']
vals = [{'label': i, 'value': i} for i in options]
trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))]
layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))]
layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))]
layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3}
else:
options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases']
vals = [{'label': i, 'value': i} for i in options]
trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))]
layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))]
layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))]
layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3}
@app.callback(
Output(component_id='box-1',component_property='figure'),
[Input(component_id='main-dropdown', component_property = 'value'),
Input(component_id='main-dropdown-2', component_property = 'value')])
def update_maingraph(selected_country, selected_graph):
if selected_graph is None and selected_country is None:
selected_country = 'Canada'
figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')]
figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Overall Progression of COVID-19: {0}'.format(str(selected_country)),
hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))
return {'data':figmain_t, 'layout': figmain_l}
elif selected_graph is None and selected_country is not None:
figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')]
figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Overall Progression of COVID-19: {0}'.format(str(selected_country)),
hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))
return {'data':figmain_t, 'layout': figmain_l}
elif selected_graph == 'Total and Daily Confirmed Cases':
figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], line=dict(color='#1A85FF', width = 1.5), mode='lines'),
go.Scatter(name='Daily Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'], line=dict(color='#D41159', width = 3), mode='lines', fill='tozeroy')]
figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)),
hovermode='x unified', xaxis=dict(title='Date',fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))
return {'data':figmain_t, 'layout': figmain_l}
else:
cols_dict = {'Confirmed':'#648FFF', 'Deaths':'#DC267F', 'Recovered':'#009E73'}
figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph], marker_color=cols_dict[selected_graph])]
figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)),
hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))
return {'data':figmain_t, 'layout': figmain_l}
if __name__ == '__main__':
app.run_server() |
7,540 | d57b91bf41f031e3362dabdef8c67a0da04fe577 | from ROOT import *
gSystem.Load("libAnalysis")
import sys
import argparse
parser = argparse.ArgumentParser(description="Python script to process and merge showers.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", help="Turn on verbose output",
action="store_true")
group.add_argument("-q", "--quiet", help="Turn off most output",
action="store_true")
parser.add_argument("-s","--source",help="Name of input file")
parser.add_argument("-o","--data-output",help="Output data file, if event is changed")
parser.add_argument("-a","--ana-output",help="Analysis output file")
parser.add_argument("-n", "--num-events",help="Number of events to process")
parser.add_argument("-d","--display",help="Turn on the display to see each view before and after." )
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
if args.verbose:
print "Verbose mode turned on."
if args.source != None:
print "\tSource file is " + args.source
if args.data_output != None:
print "\tData output file is " + args.data_output
if args.ana_output != None:
print "\tAna output file is " + args.ana_output
if args.source == None:
print "Error: please specificy an input file with -s or --source."
quit()
if args.data_output == None:
args.data_output = "default_event_output.root"
print "No event output file selected. If necessary, output will go to:"
print "\t"+args.data_output
if args.ana_output == None:
args.ana_output = "default_ana_output.root"
print "No ana output file selected. If necessary, output will go to:"
print "\t"+args.ana_output
ana_proc = larlight.ana_processor()
if args.verbose:
ana_proc.set_verbosity(larlight.MSG.DEBUG)
# Not sure what this does
ana_proc.set_io_mode(larlight.storage_manager.BOTH)
# Add the input file. Not sure if the above takes multiple input files yet
ana_proc.add_input_file(args.source)
# ?
larlight.storage_manager.get().set_in_rootdir("scanner")
# set output file
ana_proc.set_output_file(args.data_output)
# Set destination for ana stuff
ana_proc.set_ana_output_file(args.ana_output)
my_merge_alg = larlight.ClusterMergeAlg()
my_merger = larlight.ClusterMerge()
my_merger.set_mergealg(my_merge_alg)
ana_proc.add_process(my_merge_alg)
ana_proc.add_process(my_merger)
c=TCanvas("c","Wire v. Time Cluster Viewer",900,600)
while ana_proc.process_event() and ana_proc.get_process_status() == ana_proc.PROCESSING:
currentview = 0;
print my_merge_alg.GetMergeTree()
for iview in xrange(0,3):
for iclus in xrange(ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()):
gstart=ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).at(iclus)
gend =ana_proc.GetClusterGraph_Reco(int(iview),bool(false)).at(iclus)
xmin=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmin()
xmax=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmax()
ymin=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmin()
ymax=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmax()
gstart.GetXaxis().SetLimits(xmin,xmax)
gend.GetXaxis().SetLimits(xmin,xmax)
gstart.GetYaxis().SetRangeUser(ymin,ymax)
gend.GetYaxis().SetRangeUser(ymin,ymax)
gstart.SetTitle("View: %d, Cluster: %d"%(iview+1,iclus))
gstart.SetMarkerSize(3)
gstart.SetMarkerStyle(30)
gend.SetMarkerSize(3)
gend.SetMarkerStyle(29)
gstart.Draw("ALP")
gend.Draw("LP")
ana_proc.GetHisto_Reco(int(iview)).at(iclus).Draw("same")
leg = TLegend(0.6,0.65,0.88,0.85)
leg.AddEntry(gstart,"Start Point","p")
leg.AddEntry(gend,"End Point","p")
leg.Draw()
c_graph.Update()
print "Drawing cluster %d out of %d for view %d. To look at the next cluster hit enter." % (iclus,ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()-1,iview+1)
sys.stdin.readline()
print "Hit Enter to continue to next evt..."
sys.stdin.readline()
#ana_proc.run()
|
7,541 | acb879cb72e5b3ac897a271dc680e4ca763d2122 | from django.db import models
class Professor(models.Model):
nome = models.CharField(max_length=100)
apelido = models.CharField(max_length=30)
descricao = models.TextField(max_length=1000)
def __str__(self):
return self.nome
class ImagemProfessor(models.Model):
professor = models.ForeignKey(Professor, on_delete=models.CASCADE)
foto = models.ImageField(upload_to='fotos/%d/%m/%Y/', blank=True)
|
7,542 | 5fb3905abf958f0a8be41cd6ad07efb2a0cf6c66 | import sys, os
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def numStrip(n):
striped = []
if n == 0:
return [0]
while n > 0:
striped.append(n % 10)
n //= 10
return striped
|
7,543 | e7c18fa99c801fd959c868954f020d8c55babe0d |
def long_alpha(str1):
list1 = []
list2 = ""
maxi = 0
j = 0
for i in range(len(str1)):
if i == 0:
list2 += str1[i]
elif ord(str1[i - 1]) <= ord(str1[i]):
list2 += str1[i]
else:
list1.append(list2)
list2 = ""
list2 += str1[i]
list1.append(list2)
for i in range(len(list1)):
if maxi < len(list1[i]):
maxi = len(list1[i])
j = i
return list1[j]
str1 = "abcaklmoeeffd"
res = long_alpha(str1)
print(res)
|
7,544 | ad9facb9c8e552845df9171549f886f3e9cba193 | # PROBLEM: Code organized in package and want to import a submodule from one o the other pkg
# submodules without hardcoding the package name into the import statement
# SOLUTION: Use pkg-relative import
# Absolete path
from mypackage.A import grok
print(dir(grok))
grok.testA() |
7,545 | 7a6d45ef87d93af9a15bd352b893164d3a36c399 | import sys
import os
import traceback
from src.properties import *
from src.utils import *
from subprocess import call
from src.entity.cursor import Cursor
from curses import *
def main(screen, file_path):
setUpEnv()
text = readFileIfExist(file_path)
while 1:
try:
text = startEditing(screen, text)
printQuitOptions(screen)
char = screen.getch()
if char == KEY_ENTER_CODE:
writeToFile(file_path, text)
return 3, None
elif char == KEY_F9:
return 2, None
else:
pass
except KeyboardInterrupt: # quit properly, when user press Ctrl + C
return 1, None
except:
error_msg = traceback.format_exc()
return -1, error_msg
def setUpEnv():
use_default_colors()
init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)
def startEditing(screen, text):
cursor = Cursor(screen, BORDER_COLOR, text)
while 1:
char = screen.getch()
if char == KEY_F1:
break
elif char == TERMINAL_RESIZE_CODE:
cursor.resizeTextBox()
elif char == KEY_RIGHT:
cursor.moveRight()
elif char == KEY_LEFT:
cursor.moveLeft()
elif char == KEY_UP:
cursor.moveUp()
elif char == KEY_DOWN:
cursor.moveDown()
elif 31 < char < 127:
cursor.writeChar(char)
elif char == KEY_DELETE_CODE:
cursor.delete()
elif char == 10 or char == 13 or char == KEY_ENTER:
cursor.newLine()
elif char == KEY_TAB_CODE:
cursor.tab()
elif char == KEY_ESCAPE_CODE:
char = screen.getch() # get the key pressed after cmd or alt
if char == KEY_LEFT or char == 98: # 98 and 102 are left and right keys produced while pressing alt, on mac terminal
cursor.moveToLeftMost()
elif char == KEY_RIGHT or char == 102: # CMD + RIGHT
cursor.moveToRightMost()
elif char == KEY_DELETE_CODE: # CMD + DELETE
cursor.deleteWholeLine()
elif char == KEY_DOWN: # CMD + DOWN
cursor.moveToRightBottomMost()
elif char == KEY_UP: # CMD + UP
cursor.moveToRightUpMost()
else: # in case char user press ESC, it produce the same effec as CMD or ALT, but that's not what we want
ungetch(char)
else:
cursor._writeString(str(char))
return cursor.getText()
def printQuitOptions(screen):
height, width = screen.getmaxyx()
screen.clear()
y = int(height / 2.5)
x = int(width / 2.5)
screen.addstr(y, x, "Quit and Save (ENTER)")
screen.addstr(y + 1, x, "Quit (F9)")
screen.addstr(y + 2, x, "Go Back (Any Key)")
screen.refresh()
def printExitMessage(exit_code, error_msg):
if exit_code == -1:
printToTerminal("Shit just happen, sorry.")
if error_msg:
printToTerminal(error_msg)
elif exit_code == 1:
printToTerminal("Quit, safe and sound.")
elif exit_code == 2:
printToTerminal("Quit without save.")
elif exit_code == 3:
printToTerminal("saved !")
elif exit_code == 4: # -version
printToTerminal(VERSION)
elif exit_code == 5: # -help
printToTerminal("======================== Welcome to Simple Editor X ========================", "GREEN")
printToTerminal("")
printToTerminal("Arguments:")
printToTerminal(" -version")
printToTerminal(" -help")
printToTerminal(" {file_name}, to start editing an existing or create a new file")
printToTerminal("")
printToTerminal("While using:")
printToTerminal(" Press F1, then ENTER to save")
printToTerminal("")
if __name__== "__main__":
if len(sys.argv) != 2:
printToTerminal("This application take exactly 1 argument")
printToTerminal("type: 'sex -help' for more details")
exit(69)
error_msg = ""
exit_code = -1
arg = sys.argv[1].lower()
file_path = sys.argv[1]
if arg == "-v" or arg == "-version":
exit_code = 4
elif arg == "-h" or arg == "-help":
exit_code = 5
else:
exit_code, error_msg = wrapper(main, file_path)
printExitMessage(exit_code, error_msg)
|
7,546 | 1808be09c2730af5829bb0c7c0c7cfe9f80fe84c | list = input().split()
n = int(list[0])
k = int(list[1])
list.clear()
for i in range(0, n):
list.append("")
tmp = input().split()
list[i] = tmp[0] + list[int(tmp[1])-1]
for i in range(0, k):
start = input()
print(len([word for word in list if word.startswith(start)])) |
7,547 | c3fae13b488a717419adb8292597746a383b332c | class boxCar:
def __init__(self, *args, **kwargs):
print("print the keyword arguments dictionary {0} by {1}".format(kwargs, "WANGH"))
self.name = kwargs["name"]
self.domains = ["BODY","PWT","INFO","ADAS","INF"]
self.configuration = {}
def addEcu(self, ecu, domain):
if domain.upper() in self.domains:
self.configuration.setdefault(domain.upper(),[]).append(ecu.upper())
else:
print("please input one of the following domain {}".format(
["Info", "Body", "PWT", "ADAS", "Infra"]
))
def deleteEcu(self, ecu, domain):
pass
def getConfiguration(self):
return self.configuration
def setTestPhase(self, phase):
if phase.upper() in ["E1", "E2", "E3", "E4","TT", "PP"]:
self.testPhase = phase.upper()
else:
print("please input one of the following domain {}".format(
["E1", "E2", "E3", "E4","TT", "PP"]
))
def test():
boxcar=boxCar(name="CM01")
print(boxcar.name)
# print(boxcar.domains)
# print(boxcar.configuration)
# boxcar.addEcu("CEM","body")
# boxcar.addEcu("BECM","PWT")
# boxcar.addEcu("BNCM", "body")
# print(boxcar.configuration)
# boxcar.setTestPhase("E1")
# print(boxcar.testPhase)
if __name__ == "__main__":
test()
|
7,548 | 0af45914c8c111a42b0b9684f5f0ee19ef5eeb70 | import math
import random
import time
import numpy as np
class NeuralNetwork:
digits = [
[
1,1,1,1,1,
1,0,0,0,1,
1,0,0,0,1,
1,0,0,0,1,
1,1,1,1,1
],
[
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0
],
[
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1
],
[
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1
],
[
1,0,0,0,1,
1,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
0,0,0,0,1
],
[
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
0,0,0,1,0,
0,0,1,0,0,
0,1,0,0,0,
1,0,0,0,0
],
[
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
0,0,0,0,1
]
]
base_output = [
[1,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,1]
]
show_operations = False
def __init__(self, seed = 5, alpha = 0.1, min_error_percentage = 0.0005, input_size = 25, output_size = 10, hidden_num = 5):
self.seed = seed
self.alpha = alpha
self.min_error_percentage = min_error_percentage
self.input_size = input_size
self.output_size = output_size
self.hidden_num = hidden_num
def withSeed(self, seed):
self.seed = seed
return self
def withAlpha(self, alpha):
self.alpha = alpha
return self
def withMinErrorPercentage(self, min_error_percentage):
self.min_error_percentage = min_error_percentage
return self
def verbose(self, show_operations):
self.show_operations = show_operations
return self
def withHiddenLabels(self, hidden_num):
self.hidden_num = hidden_num
return self
def randomize(self):
random.seed(self.seed)
neural_network = [
[
[random.randint(-1, 0) for _ in range(self.input_size + 1)] for _ in range(self.hidden_num)
],
[
[random.randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in range(self.output_size)
]
]
return neural_network
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def product(self, v, w):
return sum([a * b for a, b in zip(v, w)])
def neuron_output(self, weights, inputs):
return self.sigmoid(self.product(weights, inputs))
def ffnn(self, neural_network, inputs):
outputs = []
for label in neural_network:
inputs = inputs + [1]
output = [self.neuron_output(neuron, inputs) for neuron in label]
outputs.append(output)
inputs = output
return outputs
def back_propagation(self, digit, inputs, target):
hidden_output, output = self.ffnn(digit, inputs)
new_output = []
new_hidden = []
error = sum((output - target) * (output - target) for output, target in zip(output, target)) * 0.5
delta_output = [output * (1 - output) * (output - target) for output, target in zip(output, target)]
for i, output_neuron in enumerate(digit[-1]):
for j, hidden_output_current in enumerate(hidden_output + [1]):
output_neuron[j] -= delta_output[i] * hidden_output_current * self.alpha
new_output.append(output_neuron)
if (self.show_operations):
print("Neuron weights: ", i, output_neuron)
hidden_delta = [hidden_output_current * (1 - hidden_output_current) * self.product(delta_output, [n[i] for n in digit[-1]]) for i, hidden_output_current in enumerate(hidden_output)]
for i, hidden_neuron in enumerate(digit[0]):
for j, input_ in enumerate(inputs + [1]):
hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha
new_hidden.append(hidden_neuron)
if (self.show_operations):
print("Hidden neuron weights: ", i, hidden_neuron)
return new_hidden, new_output, error
def randomTraining(self):
print("Starting training...")
start = time.time()
output = self.randomize()
sq_error = 1
iterations = 1
print("Initial random network: ", output)
while sq_error > self.min_error_percentage:
sq_error = 0
for i in range(len(self.digits)):
hidden, output, error = self.back_propagation(output, self.digits[i], self.base_output[i])
output = [hidden, output]
sq_error += error
sq_error = sq_error / len(self.digits)
if (self.show_operations):
print("Iterations: ", iterations, ", error percentage: ", sq_error)
iterations += 1
self.output_data = output
end = time.time()
elapsed = end - start
print("Trained finished in: ", elapsed, " seconds")
print("Total iterations: ", iterations)
print("Error percentage: ", sq_error)
print("Output result: ", self.output_data)
def guessWith(self, output):
index = 0
closest_dif = abs(output[0] - 1)
for i, value in enumerate(output):
current_dif = abs(value - 1)
if (current_dif < closest_dif):
closest_dif = current_dif
index = i
return index
def test(self, input_):
result = self.ffnn(self.output_data, input_)[-1]
print("Output: ", result)
print("Your number probably is: ", self.guessWith(result))
|
7,549 | e8a024796b6426e572571e46030678e90c537229 | from django import forms
from django.forms import widgets
# from product.models import PRODUCT_OTHER_CHOICE, PRODUCT_CATEGORY_CHOICES
PRODUCT_OTHER_CHOICE = 'other'
PRODUCT_CATEGORY_CHOICES = (
(PRODUCT_OTHER_CHOICE, 'Разное'),
('food', 'Еда'),
('drink', 'Вода'),
('cloth', 'Одежда'),
('electronics', 'Электроника')
)
class ProductForm(forms.Form):
name = forms.CharField(max_length=100, label='Наименование')
description = forms.CharField(max_length=2000, required=True, label='Описание', widget=forms.Textarea)
category = forms.ChoiceField(required=False, widget=forms.Select, choices=PRODUCT_CATEGORY_CHOICES, label='Категория')
amount = forms.IntegerField(min_value=0, label='Остаток')
price = forms.DecimalField(max_digits=7, decimal_places=2, label='Цена')
class FindForm(forms.Form):
name = forms.CharField(max_length=100, label='Наименование')
|
7,550 | 826abb18b11afd7a010e2bfc5a29ba068218c23a | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
class View1(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view1")
class View2(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return response
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view2")
@method_decorator(login_required, name='dispatch')
class View3(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view2")
|
7,551 | 34ad2e6fc7167766dac1ca962cab40511c89ad68 | import zipfile
zzz = zipfile.ZipFile('channel.zip','r')
filestr = '90052'
comment = []
for i in range(1000):
fname = filestr + ".txt"
for j in zzz.infolist():
if j.filename == fname :
print j.comment
comment.append(j.comment)
break
inzzz = zzz.open(fname).read()
print 'fname = ' + fname
print 'inzzz = ' + inzzz
try:
filestr = inzzz.split('is ')[1].split('\n')[0]
except IndexError:
print " ".join(comment)
break
#zzz.read()
|
7,552 | 0f2882971f08450e970e188ed2a06ae1683c682c | import argparse
import logging
import enum
from abc import ABCMeta, abstractmethod
from nmigen import *
from ....gateware.pads import *
from ....gateware.i2c import I2CTarget
from ... import *
class Event(enum.IntEnum):
START = 0x10
STOP = 0x20
RESTART = 0x30
WRITE = 0x40
READ = 0x50
class I2CTargetSubtarget(Elaboratable):
def __init__(self, pads, out_fifo, in_fifo, address):
self.pads = pads
self.out_fifo = out_fifo
self.in_fifo = in_fifo
self.address = address
def elaborate(self, platform):
m = Module()
m.submodules.i2c_target = i2c_target = I2CTarget(self.pads)
m.d.comb += i2c_target.address.eq(self.address)
with m.FSM():
w_data = Signal(8)
m.d.comb += i2c_target.busy.eq(1)
with m.State("IDLE"):
m.d.comb += i2c_target.busy.eq(0)
with m.If(i2c_target.start):
m.next = "SEND-START-EVENT"
with m.Elif(i2c_target.stop):
m.next = "SEND-STOP-EVENT"
with m.Elif(i2c_target.restart):
m.next = "SEND-RESTART-EVENT"
with m.Elif(i2c_target.write):
m.d.sync += w_data.eq(i2c_target.data_i)
m.next = "SEND-WRITE-EVENT"
with m.Elif(i2c_target.read):
m.next = "SEND-READ-EVENT"
with m.State("SEND-START-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.START),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "IDLE"
with m.State("SEND-STOP-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.STOP),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "IDLE"
with m.State("SEND-RESTART-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.RESTART),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "IDLE"
with m.State("SEND-WRITE-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.WRITE),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "SEND-WRITE-DATA"
with m.State("SEND-WRITE-DATA"):
m.d.comb += [
self.in_fifo.w_data.eq(w_data),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "RECV-WRITE-ACK"
with m.State("RECV-WRITE-ACK"):
with m.If(self.out_fifo.r_rdy):
m.d.comb += [
i2c_target.ack_o.eq(self.out_fifo.r_data[0]),
self.out_fifo.r_en.eq(1),
]
m.next = "IDLE"
with m.State("SEND-READ-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.READ),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "RECV-READ-DATA"
with m.State("RECV-READ-DATA"):
with m.If(self.out_fifo.r_rdy):
m.d.comb += [
i2c_target.data_o.eq(self.out_fifo.r_data),
self.out_fifo.r_en.eq(1),
]
m.next = "IDLE"
return m
class I2CTargetInterface(metaclass=ABCMeta):
def __init__(self, interface, logger):
self.lower = interface
self._logger = logger
self._level = logging.DEBUG if self._logger.name == __name__ else logging.TRACE
def _log(self, message, *args):
self._logger.log(self._level, "I²C: " + message, *args)
async def read_event(self):
event, = await self.lower.read(1)
if event == Event.START:
self._log("event start")
await self.on_start()
elif event == Event.STOP:
self._log("event stop")
await self.on_stop()
elif event == Event.RESTART:
self._log("event restart")
await self.on_restart()
elif event == Event.WRITE:
data, = await self.lower.read(1)
self._log("event write data=<%02x>", data)
ack = await self.on_write(data)
assert isinstance(ack, bool)
self._log("write %s", "ack" if ack else "nak")
await self.lower.write([ack])
elif event == Event.READ:
self._log("event read")
data = await self.on_read()
assert isinstance(data, int) and data in range(256)
self._log("read data=<%02x>", data)
await self.lower.write([data])
else:
assert False
@abstractmethod
async def on_start(self):
pass
@abstractmethod
async def on_stop(self):
pass
@abstractmethod
async def on_restart(self):
pass
@abstractmethod
async def on_write(self, data):
pass
@abstractmethod
async def on_read(self):
pass
class _DummyI2CTargetInterface(I2CTargetInterface):
async def on_start(self):
pass
async def on_stop(self):
pass
async def on_restart(self):
pass
async def on_write(self, data):
return True
async def on_read(self):
return 0xFF
class I2CTargetApplet(GlasgowApplet, name="i2c-target"):
logger = logging.getLogger(__name__)
help = "accept I²C transactions"
description = """
Process transactions on the I²C bus as a software-defined target.
This applet allows emulating any I²C device in Python, provided that the I²C initiator supports
clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes
many I²C initiators.)
The default emulated device is a dummy device that logs all transactions, acknowledges all
writes, and returns 0xFF in response to all reads.
"""
required_revision = "C0"
__pins = ("scl", "sda")
interface_cls = _DummyI2CTargetInterface
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
for pin in cls.__pins:
access.add_pin_argument(parser, pin, default=True)
def i2c_address(arg):
return int(arg, 0)
parser.add_argument(
"-A", "--address", type=i2c_address, metavar="I2C-ADDR", required=True,
help="I²C address of the target")
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self, args)
iface.add_subtarget(I2CTargetSubtarget(
pads=iface.get_pads(args, pins=self.__pins),
out_fifo=iface.get_out_fifo(),
in_fifo=iface.get_in_fifo(),
address=args.address,
))
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument(
"--pulls", default=False, action="store_true",
help="enable integrated pull-ups")
async def run(self, device, args):
pulls = set()
if args.pulls:
pulls = {args.pin_scl, args.pin_sda}
iface = await device.demultiplexer.claim_interface(self, self.mux_interface, args,
pull_high=pulls)
return self.interface_cls(iface, self.logger)
async def interact(self, device, args, iface):
while True:
await iface.read_event()
# -------------------------------------------------------------------------------------------------
class I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):
@synthesis_test
def test_build(self):
self.assertBuilds(args=["-A", "0b1010000"])
|
7,553 | 4afc2ceed860c20af071e1d9ccaca17973cb9a8e | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
dummy_head=ListNode(0)
dummy_head.next=head
pre=dummy_head
cur=head
while cur and cur.next:
next=cur.next
next_next=next.next
pre.next=next
next.next=cur
cur.next=next_next
pre=cur
cur=next_next
return dummy_head.next
# !!!!!!!!!!!!!!反转链表套路:
# 虚拟头结点
# 在循环外定义pre cur,在循环内求next和next_next(如果有需要),这样就可以cur and cur.next作为判断while条件
|
7,554 | 927470fe0087b17e5fe67a9b8b3cc13a40d8be1a | import BlockDeviceHandler
import json
import LocalMachine
import os
""" This module automaticly format the disk based on diskconf.json """
def module_print(text):
print_text = "[ autoformat disk ] " + str(text)
print(print_text)
def parse_config_file_from_disk(path, confname="diskconf.json"):
json_path = str(path) + "/" + str(confname)
if not os.path.exists(json_path):
module_print("\tPath not exists: " + str(json_path))
return None
try:
with open(json_path, "r") as f:
data = json.load(f)
module_print("config: " + str(confname) + " => " + str(data))
except Exception as e:
module_print("Json parse error: " + str(e))
return None
return data
def write_state_config_file_from_disk(path, data, confname="diskconf.json"):
json_path = str(path) + "/" + str(confname)
try:
if os.path.exists(json_path):
module_print("\tWrite back format state to " + str(json_path))
with open(json_path, "w") as f:
if str(data['is_formatted']).lower() == "false":
data['is_formatted'] = "True"
json.dump(data, f, indent=2)
module_print("\t\tSUCCESS")
else:
module_print("State already set")
else:
module_print("diskconf not exists: " + str(json_path))
except Exception as e:
module_print("\t\tFAILED")
module_print("Write back format state to disk failed:" + str(e))
def save_diskconf_file(path, confname="diskconf.json"):
json_path = str(path) + "/" + str(confname)
save_path = "/tmp"
cmd = "sudo cp {} {}".format(json_path, save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def restore_diskconf_file(path, confname="diskconf.json"):
json_path = str(path) + "/" + str(confname)
save_path = "/tmp/" + str(confname)
cmd = "sudo cp {} {}".format(save_path, json_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
cmd = "sudo rm -f {}".format(save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def safe_format_disk_check_force_mode(json_data, dev):
dev_data_modified = False
# disk is not formatted
dev_data = BlockDeviceHandler.get_device_info_data(dev)
if json_data['label'] != dev_data['label']:
dev_data_modified = True
if json_data['format'] != dev_data['filesystem']:
dev_data_modified = True
if str(json_data['is_formatted']).lower() == "false":
if str(json_data['force']).lower() == "true" and dev_data_modified is False:
module_print("[i] [format] Block device paramaters not changed but force mode is ON")
return True
elif dev_data_modified is True:
module_print("[i] [format] Requested block device parameter(s) changed - format")
return True
else:
module_print("[i] [Skip format] Blockdevice format not needed - label and system not changed")
return False
else:
module_print("[i] [is_formatted:True] Blockdevice already formatted.")
return False
def format_device_based_on_config_file(dev, premount_path):
module_print("Format device")
diskconf_path = premount_path
data = parse_config_file_from_disk(diskconf_path)
if data is not None:
if safe_format_disk_check_force_mode(data, dev):
module_print("\tSave disk config file before formatting")
save_diskconf_file(diskconf_path)
module_print("\tUnmount device before formatting")
BlockDeviceHandler.unmount_device(dev)
module_print("\tFormat device")
BlockDeviceHandler.format_ex4(dev, data['label'])
module_print("\tMount formatted device")
mount_point = BlockDeviceHandler.mount_device(dev)
module_print("\tRestore config file to disk after formating")
restore_diskconf_file(mount_point)
module_print("\tSave back the the config file with the new state")
write_state_config_file_from_disk(mount_point, data)
else:
module_print("\tDisk already formatted: {}:{}".format(dev, premount_path))
module_print("mount device: " + str(dev))
mount_point = BlockDeviceHandler.mount_device(dev)
def prepare_block_device():
if BlockDeviceHandler.is_any_device_avaible():
module_print("Block device exists")
devices = BlockDeviceHandler.list_connected_devices()
for dev in devices:
premount_path = BlockDeviceHandler.premount_device(dev)
format_device_based_on_config_file(dev, premount_path)
BlockDeviceHandler.unmount_all_premounted_devices()
if __name__ == "__main__":
prepare_block_device()
#BlockDeviceHandler.unmount_all_devices(del_mount_point=True)
|
7,555 | cab233976653b8135276ff849955f32766833354 | import os
import numpy as np
import warnings
import soundfile as sf
def load_path():
path = os.path.join(os.path.dirname(__file__))
if path == "":
path = "."
return path
def create_folder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Issue: Creating directory. ' + directory)
def read_dir_list(dirname, extention=""):
try:
return_list = []
filenames = os.listdir(dirname)
for filename in filenames:
full_filename = os.path.join(dirname, filename)
if os.path.isdir(full_filename):
return_list.extend(read_dir_list(full_filename, extention))
else:
ext = os.path.splitext(full_filename)[-1][1:]
if extention == "" or ext == extention:
return_list.append(full_filename)
return_list.sort()
return return_list
except PermissionError:
pass
def wav_to_float(x):
try:
max_value = np.iinfo(x.dtype).max
min_value = np.iinfo(x.dtype).min
except:
max_value = np.finfo(x.dtype).max
min_value = np.finfo(x.dtype).min
x = x.astype('float64', casting='safe')
x -= min_value
x /= ((max_value - min_value) / 2.)
x -= 1.
return x
def read_wav(filename):
# Reads in a wav audio file, takes the first channel, converts the signal to float64 representation
audio_signal, sample_rate = sf.read(filename)
if audio_signal.ndim > 1:
audio_signal = audio_signal[:, 0]
if audio_signal.dtype != 'float64':
audio_signal = wav_to_float(audio_signal)
return audio_signal, sample_rate
def write_wav(x, filename, sample_rate):
if type(x) != np.ndarray:
x = np.array(x)
with warnings.catch_warnings():
warnings.simplefilter("error")
sf.write(filename, x, sample_rate)
|
7,556 | 1f114b4716a44f5370495297511c305ecbb680c3 | import os, copy
from a import Moon, updateOneMoon, updateAllMoons
file_path = os.path.dirname(os.path.realpath(__file__))
input_path = file_path + "/b.in.txt"
inpt = open(input_path, 'r')
moons = []
for line in inpt:
new_moon = Moon(line)
moons.append(new_moon)
initial_moon_position = copy.deepcopy(moons)
print reduce(lambda x, y: x + y.thisIsntReallyTotalEnergy(), moons, 0)
zeros = [
[],
[],
[],
[]
]
for x in range(2780):
updateAllMoons(moons)
for i in range(len(moons)):
if moons[i].thisIsntReallyTotalEnergy() == 0:
zeros[i].append(x)
# print reduce(lambda x, y: x + y.thisIsntReallyTotalEnergy(), moons, 0)
for z in zeros:
print z
zb = []
for b in range(1, len(z)):
zb.append(z[b] - z[b-1])
print zb
za = []
for a in range(1, len(zb)):
za.append(zb[a] - zb[a-1])
print za
|
7,557 | 9065842a8e90c833278547310f027bc63c7a9a47 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import copy
import sys
def mutate(genotype_in, mut_matrix):
genotype_out = np.zeros(8)
for i in range(8):
rand_vec = np.random.choice(8, size=int(genotype_in[i]), p=mut_matrix[i,:])
genotype_out+=np.bincount(rand_vec, minlength=8)
return(genotype_out)
def propagate(genotype_in, fitness):
genotype_out = np.zeros(8)
pop_size = genotype_in.sum(dtype=int)
freq_vec = fitness*genotype_in
rand_vec = np.random.choice(8, size=pop_size, p=freq_vec/freq_vec.sum())
genotype_out = np.bincount(rand_vec, minlength=8)
return(genotype_out)
def get_mean_fitness(gt, fitness):
return(np.sum(gt*fitness, axis=1))
def get_mean_fitness3(gt, fitness):
return(np.dot(gt, fitness))
def get_gene_freqs(gt):
gene_freq = np.zeros((gt.shape[0], 3))
for i in range(8):
bin_i = np.binary_repr(i, width=3)
for j in range(3):
if bin_i[j] =='1':
gene_freq[:,j] += gt[:,i]
return(gene_freq)
def get_gene_freqs3(gt):
gene_freq = np.zeros((gt.shape[0], gt.shape[1], 3))
for i in range(8):
bin_i = np.binary_repr(i, width=3)
for j in range(3):
if bin_i[j] =='1':
gene_freq[:,:,j] += gt[:,:,i]
return(gene_freq)
def convert_mut(mp):
# convert 2x3 mutation matrix into 8x8
mut_matrix = np.zeros((8,8))
for i in range(8):
bin_i = np.binary_repr(i, width=3)
for j in range(8):
bin_j = np.binary_repr(j, width=3)
p = 1
for k in range(3):
if int(bin_i[k])>int(bin_j[k]):
p*=mp[1,k]
elif int(bin_i[k])<int(bin_j[k]):
p*=mp[0,k]
mut_matrix[i,j] = p
mut_matrix[i,i] = 2-np.sum(mut_matrix[i,:])
return(mut_matrix)
def convert_fitness(fitness):
# convert 2x2x2 fitness matrix to vector
fitness_vec = np.zeros(8)
for i in range(8):
i_bin = np.binary_repr(i, 3)
j = int(i_bin[0])
k = int(i_bin[1])
l = int(i_bin[2])
fitness_vec[i] = fitness[j,k,l]
return(fitness_vec)
def progressbar(it, prefix="", size=60, file=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def run_simulation_parallel(n, gt_in, params, label):
np.random.seed()
generations = params['generations']
gt = np.zeros((generations, 8))
mut_prob = convert_mut(params['mut_prob'][label])
fitness = convert_fitness(params['fitness'][label])
gt[0,:] = gt_in
for i in progressbar(range(1, params['generations']), "Repeat "+str(n+1), 40):
gt_mut = mutate(gt[i-1,:], mut_prob)
gt[i,:] = propagate(gt_mut, fitness)
return(gt) |
7,558 | e048170775c589cf0a9fb3d54c72dab4df3f1bcb | import pickle
import numpy as np
in_dir = "C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Data\\Dynamics\\"
out_dir = f"C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Data\\Dynamics\\"
# Read frames
train_frames = pickle.load( open(in_dir +'\\train_frames.pkl' , 'rb' ))
test_frames = pickle.load( open(in_dir +'\\test_frames.pkl' , 'rb' ))
# Read the rbm learned weights
rbm_dir = "C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Outputs\\RBM\\"
W, b_h, b_v = pickle.load( open(rbm_dir+'\\weights.pkl' , 'rb' ))
print("Loaded learned weights from RBM")
print("W", W.shape)
print("b_h", b_h.shape)
print("b_v", b_v.shape)
def sigmoid(x):
#Sigmoid activation
#Implemented interms of tanh for increased stability
return .5 * (1 + np.tanh(.5 * x))
def bernoulli_array(prob_array, dim):
# Simulating Bernoulli from uniform
sample = np.zeros(dim)
# Draw x~Uni[0,1]
uni_sample = np.random.uniform(0, 1, dim)
# return 1 if x < p else return 0
diff = uni_sample - prob_array
coords = np.argwhere(diff<0)
sample[[*coords.T]] = 1
return sample
# ------------------------ Train Data ----------------------------------------
for count in range(5):
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
# Getting hidden states of RBM using frames
# (h x v) @ (v x b) + (h x 1) = (h x b)
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print("Train Hidden h ", count, ": ", hidden.shape)
pickle.dump(hidden, open(f"{out_dir}\\train_h_{count}.pkl" , 'wb' ) )
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
# Getting hidden states of RBM using frames
# (h x v) @ (v x b) + (h x 1) = (h x b)
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
hidden = np.array(hidden)
print("Train Hidden p_h_v : ", hidden.shape)
pickle.dump(hidden, open(f"{out_dir}\\train_p_h_v.pkl" , 'wb' ) )
# ------------------------ Test Data ----------------------------------------
for count in range(5):
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
# Getting hidden states of RBM using frames
# (h x v) @ (v x b) + (h x 1) = (h x b)
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print("Test Latent Dynamics h ", count, ": ", hidden.shape)
pickle.dump(hidden, open(f"{out_dir}\\test_h_{count}.pkl" , 'wb' ) )
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
# Getting hidden states of RBM using frames
# (h x v) @ (v x b) + (h x 1) = (h x b)
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
hidden = np.array(hidden)
print("Test Hidden p_h_v : ", hidden.shape)
pickle.dump(hidden, open(f"{out_dir}\\test_p_h_v.pkl" , 'wb' ) ) |
7,559 | d6046217308745b85455aed78734700b9622782c | import os
from logzero import logger as log
from extract import data_generator
from transform import create_dataframe
def bigquery(
datafile,
dataset=os.environ["BQDATASET"],
project=os.environ["GCPPROJECT"],
schema=[
{"name": "conversation", "type": "STRING"},
{"name": "id", "type": "INTEGER"},
{"name": "from", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "wordcount", "type": "INTEGER"},
{"name": "reply_to_message_id", "type": "INTEGER"},
{"name": "photo", "type": "STRING"},
{"name": "wait_time", "type": "FLOAT"},
],
):
log.info("creating bigquery dataset")
src = data_generator(datafile)
chatinfo = create_dataframe(src)
ts = chatinfo.to_gbq(
"{}".format(dataset),
project_id="{}".format(project),
if_exists="replace",
table_schema=schema,
)
if ts:
return True
else:
return False
|
7,560 | a9ce341ffe26ab6c476237030e23e6ae57b8fa33 | from random import randint
#given a list of names, cities and neigborhoods, generate a client table.
#------------------------MODEL------------------------------
#([cliente_id], [nome], [sexo], [telefone], [cpf], [cidade_nome], [cidade_bairro_nome], [cidade_bairro_cep])
class Employee:
def __init__(self, id, name ,sex, phone, cpf, payment):
self.id = id
self.name = name
self.sex = sex
self.phone = phone
self.cpf = cpf
self.payment = payment
def __str__(self):
return '(' + self.name + '-' + self.cpf + ')'
def insertStmnt(self):
return ("INSERT INTO [dbo].[Funcionario] ([funcionario_id], [funcionario_nome], [sexo], [telefone], [cpf], [salario])"+
" VALUES ({!s},'{}','{}','{}','{}',CAST('${:.2f}' AS MONEY));").format(self.id, self.name, self.sex, self.phone, self.cpf, self.payment)
@staticmethod
def employee_from_insert_stmnt(stmnt):
if 'INSERT' not in stmnt:
return None
values = stmnt.find("VALUES")
first_arg = stmnt.find('(', values)
end = stmnt.find(')', values)
args = stmnt[first_arg+1:end].split(',')
for idx, val in enumerate(args):
args[idx] = val.strip('\'')
return Employee(*args)
#-----------------AUXILIARY FUNCTIONS-----------------------
def load_employees(file_path):
"""Given a file path to a sql file with employee insert statements, return me the python list of corresponding Employee instances"""
employees = []
for line in open(file_path):
employee = Employee.employee_from_insert_stmnt(line)
if employee:
employees.append(employee)
return employees
#-----------------------GENERATOR---------------------------
def generate_employees(maleNames, femaleNames, amount):
n_maleNames = len(maleNames)
n_femaleNames = len(femaleNames)
maleNames = maleNames[:amount]
femaleNames = femaleNames[:amount]
employees = []
employee_id = 1
sexes = ['M', 'F']
phone_id = 0
cpf_id = 0
payment = 2000.00
for name in maleNames:
sex = sexes[0]
phone = str(phone_id).zfill(8)
phone = phone[:4] + '-' + phone[4:]
cpf = str(cpf_id).zfill(11)
cpf = cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]
singlePayment = payment + randint(100, 1100)
employee = Employee(employee_id, name, sex, phone, cpf, singlePayment)
employees.append(employee)
employee_id += 1
phone_id += 1
cpf_id += 1
for name in femaleNames:
sex = sexes[1]
phone = str(phone_id).zfill(8)
phone = phone[:4] + '-' + phone[4:]
cpf = str(cpf_id).zfill(11)
cpf = cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]
singlePayment = payment + randint(100, 1100)
employee = Employee(employee_id, name, sex, phone, cpf, singlePayment)
employees.append(employee)
employee_id += 1
phone_id += 1
cpf_id += 1
return employees
#-------------------------ACTUAL SCRIPT-----------------------
def main():
#----------------------INPUT--------------------------------
maleNames = [line.rstrip('\n') for line in open('employeeMaleNames.txt')]
femaleNames = [line.rstrip('\n') for line in open('employeeFemaleNames.txt')]
cities = [line.rstrip('\n') for line in open('cities.txt')]
neighborhoods = [line.rstrip('\n') for line in open('neighborhoods.txt')]
#--------------------EXECUTION--------------------------
employees = generate_employees(maleNames, femaleNames, 15)
#--------------------------OUTPUT-------------------------------
count = {}
sql_employees_file = open("employees.sql", "w")
sql_employees_file.write("USE [lolbibis]\nGO\n\n")
for employee in employees:
#print person
#print person.insertStmnt()
sql_employees_file.write(employee.insertStmnt() + "\n")
sql_employees_file.write('GO\n\n')
sql_employees_file.close()
#------------------STATS REPORTING----------------------
for key in count:
n_maleNames = len(maleNames)
n_femaleNames = len(femaleNames)
print key+':' + str(count[key]) + ' - ' + str((count[key]/(n_names+0.0)))
|
7,561 | 0131657a7675904ee2743448f514a9f11e0dc0ad | # -*- coding: utf-8 -*-
"""
Default organizer for bioinfoinformatics project directiories - RNA-Seq based model
"""
import os
import sys
#main path
curr_path = os.getcwd()
print("\nYour current directory is: " + curr_path + "\n\nIt contains the following files and directories:\n\n" + str(os.listdir("."))) # displays the current directory and list it subdirectories
project_path = input(str("\nPath of the directory where you want to start your project: "))
path_to = os.chdir(project_path) # change to the desired directory to start the project
response = input(str("Are you ready to start a new bioinformatics project? (Y or N): " ))
project = input(str("Project name: " if response == "Y" else print("\nThat's ok, you can try it later.") + sys.exit()))
os.mkdir(project) # cria um diretório para o trabalho de raiz de cana
os.chdir(project) # muda para o diretorio sugarcane_raiz (igual ao cd)
#print(os.getcwd()) # mostra o diretório atual
data = os.mkdir("data")
raw_data = os.mkdir("data/raw_data")
processed_data = os.mkdir("data/processed_data")
genome_references = os.mkdir("data/genome_references")
programs = os.mkdir("programs")
analysis = os.mkdir("analysis")
data_pre_process = os.mkdir("analysis/data_pre_process")
assembly = os.mkdir("analysis/assembly")
annotations = os.mkdir("analysis/annotations")
alignements = os.mkdir("analysis/alignements")
quantification = os.mkdir("analysis/quantifications")
results = os.mkdir("results")
logs = os.mkdir("results/logs")
output = os.mkdir("results/output")
html = os.mkdir("results/html")
errors_out = os.mkdir("results/errors_out")
notebook = os.mkdir("notebooks")
scripts = os.mkdir("scripts")
print("\n\n-- Your directories are ready to keep your project organized. --")
print("\n\nThe current main project diretory has the following subdirectories:\n", os.listdir("."))
print("\nThe diretory data has the following subdirectories:\n", os.listdir("results"))
print("\nThe diretory analysis has the following subdirectories:\n", os.listdir("analysis"))
print("\nThe diretory results has the following subdirectories:\n", os.listdir("results"))
print("\n\n ---------- Enjoy your research! :) ----------")
|
7,562 | c5e7fdcbd4a9281597a35a180f2853caac68f811 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from functools import partial
import inspect
from itertools import product
import math
import os
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
import scipy
from scipy.sparse import csr_matrix
import scipy.stats as osp
import jax
from jax import grad, lax, vmap
import jax.numpy as jnp
import jax.random as random
from jax.scipy.special import expit, logsumexp
from jax.scipy.stats import norm as jax_norm, truncnorm as jax_truncnorm
import numpyro.distributions as dist
from numpyro.distributions import (
SineBivariateVonMises,
constraints,
kl_divergence,
transforms,
)
from numpyro.distributions.batch_util import vmap_over
from numpyro.distributions.discrete import _to_probs_bernoulli, _to_probs_multinom
from numpyro.distributions.flows import InverseAutoregressiveTransform
from numpyro.distributions.gof import InvalidTest, auto_goodness_of_fit
from numpyro.distributions.transforms import (
LowerCholeskyAffine,
PermuteTransform,
PowerTransform,
SimplexToOrderedTransform,
SoftplusTransform,
biject_to,
)
from numpyro.distributions.util import (
matrix_to_tril_vec,
multinomial,
signed_stick_breaking_tril,
sum_rightmost,
vec_to_tril_matrix,
)
from numpyro.nn import AutoregressiveNN
TEST_FAILURE_RATE = 2e-5 # For all goodness-of-fit tests.
def my_kron(A, B):
D = A[..., :, None, :, None] * B[..., None, :, None, :]
ds = D.shape
newshape = (*ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1])
return D.reshape(newshape)
def _identity(x):
return x
def _circ_mean(angles):
return jnp.arctan2(
jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(angles), axis=0)
)
def sde_fn1(x, _):
lam = 0.1
sigma2 = 0.1
return lam * x, sigma2
def sde_fn2(xy, _):
tau, a = 2.0, 1.1
x, y = xy[0], xy[1]
dx = tau * (x - x**3.0 / 3.0 + y)
dy = (1.0 / tau) * (a - x)
dxy = jnp.vstack([dx, dy]).reshape(xy.shape)
sigma2 = 0.1
return dxy, sigma2
class T(namedtuple("TestCase", ["jax_dist", "sp_dist", "params"])):
def __new__(cls, jax_dist, *params):
sp_dist = get_sp_dist(jax_dist)
return super(cls, T).__new__(cls, jax_dist, sp_dist, params)
def _mvn_to_scipy(loc, cov, prec, tril):
jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _multivariate_t_to_scipy(df, loc, tril):
if scipy.__version__ < "1.6.0":
pytest.skip(
"Multivariate Student-T distribution is not available in scipy < 1.6"
)
jax_dist = dist.MultivariateStudentT(df, loc, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_t(loc=mean, shape=cov, df=df)
def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _truncnorm_to_scipy(loc, scale, low, high):
if low is None:
a = -np.inf
else:
a = (low - loc) / scale
if high is None:
b = np.inf
else:
b = (high - loc) / scale
return osp.truncnorm(a, b, loc=loc, scale=scale)
def _TruncatedNormal(loc, scale, low, high):
return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)
def _TruncatedCauchy(loc, scale, low, high):
return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)
_TruncatedNormal.arg_constraints = {}
_TruncatedNormal.reparametrized_params = []
_TruncatedNormal.infer_shapes = lambda *args: (lax.broadcast_shapes(*args), ())
class SineSkewedUniform(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
lower, upper = (np.array([-math.pi, -math.pi]), np.array([math.pi, math.pi]))
base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class SineSkewedVonMises(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = (np.array([0.0]), np.array([1.0]))
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises(self: SineSkewedVonMises, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class SineSkewedVonMisesBatched(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = (np.array([0.0, -1.234]), np.array([1.0, 10.0]))
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises_batched(
self: SineSkewedVonMisesBatched, skewness=None
):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class _GaussianMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, scale):
component_dist = dist.Normal(loc=loc, scale=scale)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(
mixing_distribution=mixing_distribution,
component_distribution=component_dist,
)
@property
def loc(self):
return self.component_distribution.loc
@property
def scale(self):
return self.component_distribution.scale
@vmap_over.register
def _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):
component_distribution = vmap_over(
self.component_distribution, loc=loc, scale=scale
)
return vmap_over.dispatch(dist.MixtureSameFamily)(
self, _component_distribution=component_distribution
)
class _Gaussian2DMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, covariance_matrix):
component_dist = dist.MultivariateNormal(
loc=loc, covariance_matrix=covariance_matrix
)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(
mixing_distribution=mixing_distribution,
component_distribution=component_dist,
)
@property
def loc(self):
return self.component_distribution.loc
@property
def covariance_matrix(self):
return self.component_distribution.covariance_matrix
@vmap_over.register
def _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):
component_distribution = vmap_over(self.component_distribution, loc=loc)
return vmap_over.dispatch(dist.MixtureSameFamily)(
self, _component_distribution=component_distribution
)
class _GeneralMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, scales):
component_dists = [
dist.Normal(loc=loc_, scale=scale_) for loc_, scale_ in zip(locs, scales)
]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(
mixing_distribution=mixing_distribution,
component_distributions=component_dists,
)
@property
def locs(self):
# hotfix for vmapping tests, which cannot easily check non-array attributes
return self.component_distributions[0].loc
@property
def scales(self):
return self.component_distributions[0].scale
@vmap_over.register
def _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):
component_distributions = [
vmap_over(d, loc=locs, scale=scales) for d in self.component_distributions
]
return vmap_over.dispatch(dist.MixtureGeneral)(
self, _component_distributions=component_distributions
)
class _General2DMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, covariance_matrices):
component_dists = [
dist.MultivariateNormal(loc=loc_, covariance_matrix=covariance_matrix)
for loc_, covariance_matrix in zip(locs, covariance_matrices)
]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(
mixing_distribution=mixing_distribution,
component_distributions=component_dists,
)
@property
def locs(self):
# hotfix for vmapping tests, which cannot easily check non-array attributes
return self.component_distributions[0].loc
@property
def covariance_matrices(self):
return self.component_distributions[0].covariance_matrix
@vmap_over.register
def _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):
component_distributions = [
vmap_over(d, loc=locs) for d in self.component_distributions
]
return vmap_over.dispatch(dist.MixtureGeneral)(
self, _component_distributions=component_distributions
)
class _ImproperWrapper(dist.ImproperUniform):
def sample(self, key, sample_shape=()):
transform = biject_to(self.support)
prototype_value = jnp.zeros(self.event_shape)
unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))
shape = sample_shape + self.batch_shape + unconstrained_event_shape
unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)
return transform(unconstrained_samples)
class ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):
arg_constraints = {"rate": constraints.positive, "gate_logits": constraints.real}
pytree_data_fields = ("rate",)
def __init__(self, rate, gate_logits, *, validate_args=None):
self.rate = rate
super().__init__(dist.Poisson(rate), gate_logits, validate_args=validate_args)
@vmap_over.register
def _vmap_over_zero_inflated_poisson_logits(
self: ZeroInflatedPoissonLogits, rate=None, gate_logits=None
):
dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(
self,
base_dist=vmap_over(self.base_dist, rate=rate),
gate_logits=gate_logits,
gate=gate_logits,
)
dist_axes.rate = rate
return dist_axes
class SparsePoisson(dist.Poisson):
def __init__(self, rate, *, validate_args=None):
super().__init__(rate, is_sparse=True, validate_args=validate_args)
class FoldedNormal(dist.FoldedDistribution):
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
def __init__(self, loc, scale, validate_args=None):
self.loc = loc
self.scale = scale
super().__init__(dist.Normal(loc, scale), validate_args=validate_args)
@vmap_over.register
def _vmap_over_folded_normal(self: "FoldedNormal", loc=None, scale=None):
d = vmap_over.dispatch(dist.FoldedDistribution)(
self, base_dist=vmap_over(self.base_dist, loc=loc, scale=scale)
)
d.loc = loc
d.scale = scale
return d
class _SparseCAR(dist.CAR):
reparametrized_params = ["loc", "correlation", "conditional_precision"]
def __init__(
self,
loc,
correlation,
conditional_precision,
adj_matrix,
*,
is_sparse=True,
validate_args=None,
):
super().__init__(
loc,
correlation,
conditional_precision,
adj_matrix,
is_sparse=True,
validate_args=validate_args,
)
_DIST_MAP = {
dist.AsymmetricLaplace: lambda loc, scale, asymmetry: osp.laplace_asymmetric(
asymmetry, loc=loc, scale=scale
),
dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),
dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),
dist.Beta: lambda con1, con0: osp.beta(con1, con0),
dist.BetaProportion: lambda mu, kappa: osp.beta(mu * kappa, (1 - mu) * kappa),
dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),
dist.BinomialLogits: lambda logits, total_count: osp.binom(
n=total_count, p=_to_probs_bernoulli(logits)
),
dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),
dist.Chi2: lambda df: osp.chi2(df),
dist.Dirichlet: lambda conc: osp.dirichlet(conc),
dist.Exponential: lambda rate: osp.expon(scale=jnp.reciprocal(rate)),
dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1.0 / rate),
dist.GeometricProbs: lambda probs: osp.geom(p=probs, loc=-1),
dist.GeometricLogits: lambda logits: osp.geom(
p=_to_probs_bernoulli(logits), loc=-1
),
dist.Gumbel: lambda loc, scale: osp.gumbel_r(loc=loc, scale=scale),
dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),
dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),
dist.InverseGamma: lambda conc, rate: osp.invgamma(conc, scale=rate),
dist.Laplace: lambda loc, scale: osp.laplace(loc=loc, scale=scale),
dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=jnp.exp(loc)),
dist.LogUniform: lambda a, b: osp.loguniform(a, b),
dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(
n=total_count, p=probs
),
dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(
n=total_count, p=_to_probs_multinom(logits)
),
dist.MultivariateNormal: _mvn_to_scipy,
dist.MultivariateStudentT: _multivariate_t_to_scipy,
dist.LowRankMultivariateNormal: _lowrank_mvn_to_scipy,
dist.Normal: lambda loc, scale: osp.norm(loc=loc, scale=scale),
dist.Pareto: lambda scale, alpha: osp.pareto(alpha, scale=scale),
dist.Poisson: lambda rate: osp.poisson(rate),
dist.StudentT: lambda df, loc, scale: osp.t(df=df, loc=loc, scale=scale),
dist.Uniform: lambda a, b: osp.uniform(a, b - a),
dist.Logistic: lambda loc, scale: osp.logistic(loc=loc, scale=scale),
dist.VonMises: lambda loc, conc: osp.vonmises(
loc=np.array(loc, dtype=np.float64), kappa=np.array(conc, dtype=np.float64)
),
dist.Weibull: lambda scale, conc: osp.weibull_min(
c=conc,
scale=scale,
),
_TruncatedNormal: _truncnorm_to_scipy,
}
def get_sp_dist(jax_dist):
classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]
for cls in classes:
if cls in _DIST_MAP:
return _DIST_MAP[cls]
CONTINUOUS = [
T(dist.AsymmetricLaplace, 1.0, 0.5, 1.0),
T(dist.AsymmetricLaplace, np.array([1.0, 2.0]), 2.0, 2.0),
T(dist.AsymmetricLaplace, np.array([[1.0], [2.0]]), 2.0, np.array([3.0, 5.0])),
T(dist.AsymmetricLaplaceQuantile, 0.0, 1.0, 0.5),
T(dist.AsymmetricLaplaceQuantile, np.array([1.0, 2.0]), 2.0, 0.7),
T(
dist.AsymmetricLaplaceQuantile,
np.array([[1.0], [2.0]]),
2.0,
np.array([0.2, 0.8]),
),
T(dist.Beta, 0.2, 1.1),
T(dist.Beta, 1.0, np.array([2.0, 2.0])),
T(dist.Beta, 1.0, np.array([[1.0, 1.0], [2.0, 2.0]])),
T(dist.BetaProportion, 0.2, 10.0),
T(dist.BetaProportion, 0.51, np.array([2.0, 1.0])),
T(dist.BetaProportion, 0.5, np.array([[4.0, 4.0], [2.0, 2.0]])),
T(dist.Chi2, 2.0),
T(dist.Chi2, np.array([0.3, 1.3])),
T(dist.Cauchy, 0.0, 1.0),
T(dist.Cauchy, 0.0, np.array([1.0, 2.0])),
T(dist.Cauchy, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.Dirichlet, np.array([1.7])),
T(dist.Dirichlet, np.array([0.2, 1.1])),
T(dist.Dirichlet, np.array([[0.2, 1.1], [2.0, 2.0]])),
T(
dist.EulerMaruyama,
np.array([0.0, 0.1, 0.2]),
sde_fn1,
dist.Normal(0.1, 1.0),
),
T(
dist.EulerMaruyama,
np.array([0.0, 0.1, 0.2]),
sde_fn2,
dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),
),
T(
dist.EulerMaruyama,
np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),
sde_fn2,
dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),
),
T(
dist.EulerMaruyama,
np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),
sde_fn2,
dist.Normal(jnp.array([[0.0, 1.0], [2.0, 3.0]]), 1e-2).to_event(1),
),
T(dist.Exponential, 2.0),
T(dist.Exponential, np.array([4.0, 2.0])),
T(dist.Gamma, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Gamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.GaussianRandomWalk, 0.1, 10),
T(dist.GaussianRandomWalk, np.array([0.1, 0.3, 0.25]), 10),
T(
dist.GaussianCopulaBeta,
np.array([7.0, 2.0]),
np.array([4.0, 10.0]),
np.array([[1.0, 0.75], [0.75, 1.0]]),
),
T(dist.GaussianCopulaBeta, 2.0, 1.5, np.eye(3)),
T(dist.GaussianCopulaBeta, 2.0, 1.5, np.full((5, 3, 3), np.eye(3))),
T(dist.Gompertz, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Gompertz, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.Gumbel, 0.0, 1.0),
T(dist.Gumbel, 0.5, 2.0),
T(dist.Gumbel, np.array([0.0, 0.5]), np.array([1.0, 2.0])),
T(FoldedNormal, 2.0, 4.0),
T(FoldedNormal, np.array([2.0, 50.0]), np.array([4.0, 100.0])),
T(dist.HalfCauchy, 1.0),
T(dist.HalfCauchy, np.array([1.0, 2.0])),
T(dist.HalfNormal, 1.0),
T(dist.HalfNormal, np.array([1.0, 2.0])),
T(_ImproperWrapper, constraints.positive, (), (3,)),
T(dist.InverseGamma, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.InverseGamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.Kumaraswamy, 10.0, np.array([2.0, 3.0])),
T(dist.Kumaraswamy, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Kumaraswamy, 0.6, 0.5),
T(dist.Laplace, 0.0, 1.0),
T(dist.Laplace, 0.5, np.array([1.0, 2.5])),
T(dist.Laplace, np.array([1.0, -0.5]), np.array([2.3, 3.0])),
T(dist.LKJ, 2, 0.5, "onion"),
T(dist.LKJ, 5, np.array([0.5, 1.0, 2.0]), "cvine"),
T(dist.LKJCholesky, 2, 0.5, "onion"),
T(dist.LKJCholesky, 2, 0.5, "cvine"),
T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), "onion"),
pytest.param(
*T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), "cvine"),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
pytest.param(
*T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), "onion"),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), "cvine"),
T(dist.Logistic, 0.0, 1.0),
T(dist.Logistic, 1.0, np.array([1.0, 2.0])),
T(dist.Logistic, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.LogNormal, 1.0, 0.2),
T(dist.LogNormal, -1.0, np.array([0.5, 1.3])),
T(dist.LogNormal, np.array([0.5, -0.7]), np.array([[0.1, 0.4], [0.5, 0.1]])),
T(dist.LogUniform, 1.0, 2.0),
T(dist.LogUniform, 1.0, np.array([2.0, 3.0])),
T(dist.LogUniform, np.array([1.0, 2.0]), np.array([[3.0], [4.0]])),
T(
dist.MatrixNormal,
1.0 * np.arange(6).reshape(3, 2),
np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]),
np.array([[1.0, 0], [0.4, 1]]),
),
T(
dist.MatrixNormal,
1.0 * np.arange(12).reshape((2, 3, 2)),
np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]) * np.ones((2, 3, 3)),
np.array([[1.0, 0], [0.4, 0.5]]) * np.ones((2, 2, 2)),
),
T(
dist.MatrixNormal,
1.0 * np.arange(36).reshape((2, 3, 3, 2)),
np.identity(3),
np.identity(2),
),
T(dist.MultivariateNormal, 0.0, np.array([[1.0, 0.5], [0.5, 1.0]]), None, None),
T(
dist.MultivariateNormal,
np.array([1.0, 3.0]),
None,
np.array([[1.0, 0.5], [0.5, 1.0]]),
None,
),
T(
dist.MultivariateNormal,
np.array([1.0, 3.0]),
None,
np.array([[[1.0, 0.5], [0.5, 1.0]]]),
None,
),
T(
dist.MultivariateNormal,
np.array([2.0]),
None,
None,
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateNormal,
np.arange(6, dtype=np.float32).reshape((3, 2)),
None,
None,
np.array([[1.0, 0.0], [0.0, 1.0]]),
),
T(
dist.MultivariateNormal,
0.0,
None,
np.broadcast_to(np.identity(3), (2, 3, 3)),
None,
),
T(
dist.CAR,
1.2,
np.array([-0.2, 0.3]),
0.1,
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
dist.CAR,
np.array([0.0, 1.0, 3.0, 4.0]),
0.1,
np.array([0.3, 0.7]),
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
_SparseCAR,
np.array([[0.0, 1.0, 3.0, 4.0], [2.0, -1.0, -3.0, 2.0]]),
0.0,
0.1,
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
dist.MultivariateStudentT,
15.0,
0.0,
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([1.0, 3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([1.0, 3.0]),
np.array([[[1.0, 0.0], [0.5, 1.0]]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.arange(6, dtype=np.float32).reshape((3, 2)),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.ones(3),
np.broadcast_to(np.identity(3), (2, 3, 3)),
),
T(
dist.MultivariateStudentT,
np.array(7.0),
np.array([1.0, 3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
np.arange(20, 22, dtype=jnp.float32),
np.ones(3),
np.broadcast_to(jnp.identity(3), (2, 3, 3)),
),
T(
dist.MultivariateStudentT,
np.arange(20, 26, dtype=jnp.float32).reshape((3, 2)),
np.ones(2),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.LowRankMultivariateNormal,
np.zeros(2),
np.array([[1.0], [0.0]]),
np.array([1.0, 1.0]),
),
T(
dist.LowRankMultivariateNormal,
np.arange(6, dtype=jnp.float32).reshape((2, 3)),
np.arange(6, dtype=jnp.float32).reshape((3, 2)),
np.array([1.0, 2.0, 3.0]),
),
T(dist.Normal, 0.0, 1.0),
T(dist.Normal, 1.0, np.array([1.0, 2.0])),
T(dist.Normal, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.Pareto, 1.0, 2.0),
T(dist.Pareto, np.array([1.0, 0.5]), np.array([0.3, 2.0])),
T(dist.Pareto, np.array([[1.0], [3.0]]), np.array([1.0, 0.5])),
T(dist.RelaxedBernoulliLogits, 2.0, -10.0),
T(dist.RelaxedBernoulliLogits, np.array([1.0, 3.0]), np.array([3.0, 8.0])),
T(dist.SoftLaplace, 1.0, 1.0),
T(dist.SoftLaplace, np.array([-1.0, 50.0]), np.array([4.0, 100.0])),
T(dist.StudentT, 1.0, 1.0, 0.5),
T(dist.StudentT, 2.0, np.array([1.0, 2.0]), 2.0),
T(dist.StudentT, np.array([3.0, 5.0]), np.array([[1.0], [2.0]]), 2.0),
T(_TruncatedCauchy, 0.0, 1.0, -1.0, None),
T(_TruncatedCauchy, 0.0, np.array([1.0, 2.0]), 1.0, None),
T(
_TruncatedCauchy,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
np.array([-2.0, 2.0]),
None,
),
T(_TruncatedCauchy, 0.0, 1.0, None, 1.0),
T(_TruncatedCauchy, 0.0, 1.0, -1.0, 1.0),
T(_TruncatedNormal, 0.0, 1.0, -1.0, None),
T(_TruncatedNormal, -1.0, np.array([1.0, 2.0]), 1.0, None),
T(
_TruncatedNormal,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
np.array([-2.0, 2.0]),
None,
),
T(_TruncatedNormal, -1.0, 2.0, 1.0, 5.0),
T(_TruncatedNormal, np.array([-1.0, 4.0]), 2.0, None, 5.0),
T(_TruncatedNormal, -1.0, np.array([2.0, 3.0]), 1.0, None),
T(_TruncatedNormal, -1.0, 2.0, np.array([-6.0, 4.0]), np.array([-4.0, 6.0])),
T(
_TruncatedNormal,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
None,
np.array([-2.0, 2.0]),
),
T(dist.TwoSidedTruncatedDistribution, dist.Laplace(0.0, 1.0), -2.0, 3.0),
T(dist.Uniform, 0.0, 2.0),
T(dist.Uniform, 1.0, np.array([2.0, 3.0])),
T(dist.Uniform, np.array([0.0, 0.0]), np.array([[2.0], [3.0]])),
T(dist.Weibull, 0.2, 1.1),
T(dist.Weibull, 2.8, np.array([2.0, 2.0])),
T(dist.Weibull, 1.8, np.array([[1.0, 1.0], [2.0, 2.0]])),
T(
_GaussianMixture,
np.ones(3) / 3.0,
np.array([0.0, 7.7, 2.1]),
np.array([4.2, 7.7, 2.1]),
),
T(
_Gaussian2DMixture,
np.array([0.2, 0.5, 0.3]),
np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean
np.array(
[
[
[0.1, -0.2],
[-0.2, 1.0],
],
[
[0.75, 0.0],
[0.0, 0.75],
],
[
[1.0, 0.5],
[0.5, 0.27],
],
]
), # Covariance
),
T(
_GeneralMixture,
np.array([0.2, 0.3, 0.5]),
np.array([0.0, 7.7, 2.1]),
np.array([4.2, 1.7, 2.1]),
),
T(
_General2DMixture,
np.array([0.2, 0.5, 0.3]),
np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean
np.array(
[
[
[0.1, -0.2],
[-0.2, 1.0],
],
[
[0.75, 0.0],
[0.0, 0.75],
],
[
[1.0, 0.5],
[0.5, 0.27],
],
]
), # Covariance
),
]
DIRECTIONAL = [
T(dist.VonMises, 2.0, 10.0),
T(dist.VonMises, 2.0, np.array([150.0, 10.0])),
T(dist.VonMises, np.array([1 / 3 * np.pi, -1.0]), np.array([20.0, 30.0])),
pytest.param(
*T(
dist.SineBivariateVonMises,
0.0,
0.0,
5.0,
6.0,
2.0,
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(
dist.SineBivariateVonMises,
3.003,
-1.343,
5.0,
6.0,
2.0,
),
pytest.param(
*T(
dist.SineBivariateVonMises,
-1.232,
-1.3430,
3.4,
2.0,
1.0,
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
pytest.param(
*T(
dist.SineBivariateVonMises,
np.array([math.pi - 0.2, 1.0]),
np.array([0.0, 1.0]),
np.array([5.0, 5.0]),
np.array([7.0, 0.5]),
None,
np.array([0.5, 0.1]),
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(dist.ProjectedNormal, np.array([0.0, 0.0])),
T(dist.ProjectedNormal, np.array([[2.0, 3.0]])),
T(dist.ProjectedNormal, np.array([0.0, 0.0, 0.0])),
T(dist.ProjectedNormal, np.array([[-1.0, 2.0, 3.0]])),
T(SineSkewedUniform, np.array([-math.pi / 4, 0.1])),
T(SineSkewedVonMises, np.array([0.342355])),
T(SineSkewedVonMisesBatched, np.array([[0.342355, -0.0001], [0.91, 0.09]])),
]
DISCRETE = [
T(dist.BetaBinomial, 2.0, 5.0, 10),
T(
dist.BetaBinomial,
np.array([2.0, 4.0]),
np.array([5.0, 3.0]),
np.array([10, 12]),
),
T(dist.BernoulliProbs, 0.2),
T(dist.BernoulliProbs, np.array([0.2, 0.7])),
T(dist.BernoulliLogits, np.array([-1.0, 3.0])),
T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([10, 2])),
T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([5, 8])),
T(dist.BinomialLogits, np.array([-1.0, 3.0]), np.array([5, 8])),
T(dist.CategoricalProbs, np.array([1.0])),
T(dist.CategoricalProbs, np.array([0.1, 0.5, 0.4])),
T(dist.CategoricalProbs, np.array([[0.1, 0.5, 0.4], [0.4, 0.4, 0.2]])),
T(dist.CategoricalLogits, np.array([-5.0])),
T(dist.CategoricalLogits, np.array([1.0, 2.0, -2.0])),
T(dist.CategoricalLogits, np.array([[-1, 2.0, 3.0], [3.0, -4.0, -2.0]])),
T(dist.Delta, 1),
T(dist.Delta, np.array([0.0, 2.0])),
T(dist.Delta, np.array([0.0, 2.0]), np.array([-2.0, -4.0])),
T(dist.DirichletMultinomial, np.array([1.0, 2.0, 3.9]), 10),
T(dist.DirichletMultinomial, np.array([0.2, 0.7, 1.1]), np.array([5, 5])),
T(dist.GammaPoisson, 2.0, 2.0),
T(dist.GammaPoisson, np.array([6.0, 2]), np.array([2.0, 8.0])),
T(dist.GeometricProbs, 0.2),
T(dist.GeometricProbs, np.array([0.2, 0.7])),
T(dist.GeometricLogits, np.array([-1.0, 3.0])),
T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), 10),
T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), np.array([5, 8])),
T(dist.MultinomialLogits, np.array([-1.0, 3.0]), np.array([[5], [8]])),
T(dist.NegativeBinomialProbs, 10, 0.2),
T(dist.NegativeBinomialProbs, 10, np.array([0.2, 0.6])),
T(dist.NegativeBinomialProbs, np.array([4.2, 10.7, 2.1]), 0.2),
T(
dist.NegativeBinomialProbs,
np.array([4.2, 10.7, 2.1]),
np.array([0.2, 0.6, 0.5]),
),
T(dist.NegativeBinomialLogits, 10, -2.1),
T(dist.NegativeBinomialLogits, 10, np.array([-5.2, 2.1])),
T(dist.NegativeBinomialLogits, np.array([4.2, 10.7, 2.1]), -5.2),
T(
dist.NegativeBinomialLogits,
np.array([4.2, 7.7, 2.1]),
np.array([4.2, 0.7, 2.1]),
),
T(dist.NegativeBinomial2, 0.3, 10),
T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), 10),
T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), np.array([10.2, 20.7, 2.1])),
T(dist.OrderedLogistic, -2, np.array([-10.0, 4.0, 9.0])),
T(dist.OrderedLogistic, np.array([-4, 3, 4, 5]), np.array([-1.5])),
T(dist.DiscreteUniform, -2, np.array([-1.0, 4.0, 9.0])),
T(dist.DiscreteUniform, np.array([-4, 3, 4, 5]), np.array([6])),
T(dist.Poisson, 2.0),
T(dist.Poisson, np.array([2.0, 3.0, 5.0])),
T(SparsePoisson, 2.0),
T(SparsePoisson, np.array([2.0, 3.0, 5.0])),
T(SparsePoisson, 2),
T(dist.ZeroInflatedPoisson, 0.6, 2.0),
T(dist.ZeroInflatedPoisson, np.array([0.2, 0.7, 0.3]), np.array([2.0, 3.0, 5.0])),
T(ZeroInflatedPoissonLogits, 2.0, 3.0),
T(
ZeroInflatedPoissonLogits,
np.array([0.2, 4.0, 0.3]),
np.array([2.0, -3.0, 5.0]),
),
]
def _is_batched_multivariate(jax_dist):
return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0
def gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):
eps = 1e-6
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size)
elif isinstance(constraint, constraints.greater_than):
return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.randint(key, size, lower_bound, upper_bound + 1)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound + random.poisson(key, np.array(5), shape=size)
elif isinstance(constraint, constraints.interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=lower_bound, maxval=upper_bound)
elif constraint in (constraints.real, constraints.real_vector):
return random.normal(key, size)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return multinomial(
key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]
)
elif constraint is constraints.corr_cholesky:
return signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
elif constraint is constraints.corr_matrix:
cholesky = signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return jnp.tril(random.uniform(key, size))
elif constraint is constraints.positive_definite:
x = random.normal(key, size)
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x - random.normal(key, size[:-1] + (1,))
elif isinstance(constraint, constraints.independent):
return gen_values_within_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
return x / jnp.linalg.norm(x, axis=-1)
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [0, (-1) ** sign * 0.5]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError("{} not implemented.".format(constraint))
def gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size) - 2
elif isinstance(constraint, constraints.greater_than):
return constraint.lower_bound - jnp.exp(random.normal(key, size))
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
return random.randint(key, size, lower_bound - 1, lower_bound)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound - random.poisson(key, np.array(5), shape=size)
elif isinstance(constraint, constraints.interval):
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=upper_bound, maxval=upper_bound + 1.0)
elif constraint in [constraints.real, constraints.real_vector]:
return lax.full(size, np.nan)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]) + 1e-2
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return (
multinomial(
key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]
)
+ 1
)
elif constraint is constraints.corr_cholesky:
return (
signed_stick_breaking_tril(
random.uniform(
key,
size[:-2] + (size[-1] * (size[-1] - 1) // 2,),
minval=-1,
maxval=1,
)
)
+ 1e-2
)
elif constraint is constraints.corr_matrix:
cholesky = 1e-2 + signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return random.uniform(key, size)
elif constraint is constraints.positive_definite:
return random.normal(key, size)
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x[..., ::-1]
elif isinstance(constraint, constraints.independent):
return gen_values_outside_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)
return 2 * x
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError("{} not implemented.".format(constraint))
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
def test_dist_shape(jax_dist, sp_dist, params, prepend_shape):
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
expected_shape = prepend_shape + jax_dist.batch_shape + jax_dist.event_shape
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert isinstance(samples, jnp.ndarray)
assert jnp.shape(samples) == expected_shape
if (
sp_dist
and not _is_batched_multivariate(jax_dist)
and not isinstance(jax_dist, dist.MultivariateStudentT)
):
sp_dist = sp_dist(*params)
sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)
assert jnp.shape(sp_samples) == expected_shape
elif (
sp_dist
and not _is_batched_multivariate(jax_dist)
and isinstance(jax_dist, dist.MultivariateStudentT)
):
sp_dist = sp_dist(*params)
size_ = prepend_shape + jax_dist.batch_shape
size = (1) if size_ == () else size_
try:
sp_samples = sp_dist.rvs(size=size)
except ValueError:
pytest.skip("scipy multivariate t doesn't support size with > 1 element")
assert jnp.shape(sp_samples) == expected_shape
if isinstance(jax_dist, (dist.MultivariateNormal, dist.MultivariateStudentT)):
assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2
assert_allclose(
jax_dist.precision_matrix,
jnp.linalg.inv(jax_dist.covariance_matrix),
rtol=1e-6,
)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_infer_shapes(jax_dist, sp_dist, params):
shapes = tuple(getattr(p, "shape", ()) for p in params)
shapes = tuple(x() if callable(x) else x for x in shapes)
jax_dist = jax_dist(*params)
try:
expected_batch_shape, expected_event_shape = type(jax_dist).infer_shapes(
*shapes
)
except NotImplementedError:
pytest.skip(f"{type(jax_dist).__name__}.infer_shapes() is not implemented")
assert jax_dist.batch_shape == expected_batch_shape
assert jax_dist.event_shape == expected_event_shape
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_has_rsample(jax_dist, sp_dist, params):
jax_dist = jax_dist(*params)
masked_dist = jax_dist.mask(False)
indept_dist = jax_dist.expand_by([2]).to_event(1)
transf_dist = dist.TransformedDistribution(jax_dist, biject_to(constraints.real))
assert masked_dist.has_rsample == jax_dist.has_rsample
assert indept_dist.has_rsample == jax_dist.has_rsample
assert transf_dist.has_rsample == jax_dist.has_rsample
if jax_dist.has_rsample:
assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete
if isinstance(jax_dist, dist.TransformedDistribution):
assert jax_dist.base_dist.has_rsample
else:
assert set(jax_dist.arg_constraints) == set(jax_dist.reparametrized_params)
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.Normal):
masked_dist.rsample(random.PRNGKey(0))
indept_dist.rsample(random.PRNGKey(0))
transf_dist.rsample(random.PRNGKey(0))
else:
with pytest.raises(NotImplementedError):
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.BernoulliProbs):
with pytest.raises(NotImplementedError):
masked_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
indept_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
transf_dist.rsample(random.PRNGKey(0))
@pytest.mark.parametrize("batch_shape", [(), (4,), (3, 2)])
def test_unit(batch_shape):
log_factor = random.normal(random.PRNGKey(0), batch_shape)
d = dist.Unit(log_factor=log_factor)
x = d.sample(random.PRNGKey(1))
assert x.shape == batch_shape + (0,)
assert (d.log_prob(x) == log_factor).all()
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS)
def test_sample_gradient(jax_dist, sp_dist, params):
# we have pathwise gradient for gamma sampler
gamma_derived_params = {
"Gamma": ["concentration"],
"Beta": ["concentration1", "concentration0"],
"BetaProportion": ["mean", "concentration"],
"Chi2": ["df"],
"Dirichlet": ["concentration"],
"InverseGamma": ["concentration"],
"LKJ": ["concentration"],
"LKJCholesky": ["concentration"],
"StudentT": ["df"],
}.get(jax_dist.__name__, [])
dist_args = [
p
for p in (
inspect.getfullargspec(jax_dist.__init__)[0][1:]
if inspect.isclass(jax_dist)
# account the the case jax_dist is a function
else inspect.getfullargspec(jax_dist)[0]
)
]
params_dict = dict(zip(dist_args[: len(params)], params))
jax_class = type(jax_dist(**params_dict))
reparametrized_params = [
p for p in jax_class.reparametrized_params if p not in gamma_derived_params
]
if not reparametrized_params:
pytest.skip("{} not reparametrized.".format(jax_class.__name__))
nonrepara_params_dict = {
k: v for k, v in params_dict.items() if k not in reparametrized_params
}
repara_params = tuple(
v for k, v in params_dict.items() if k in reparametrized_params
)
rng_key = random.PRNGKey(0)
def fn(args):
args_dict = dict(zip(reparametrized_params, args))
return jnp.sum(
jax_dist(**args_dict, **nonrepara_params_dict).sample(key=rng_key)
)
actual_grad = jax.grad(fn)(repara_params)
assert len(actual_grad) == len(repara_params)
eps = 1e-3
for i in range(len(repara_params)):
if repara_params[i] is None:
continue
args_lhs = [p if j != i else p - eps for j, p in enumerate(repara_params)]
args_rhs = [p if j != i else p + eps for j, p in enumerate(repara_params)]
fn_lhs = fn(args_lhs)
fn_rhs = fn(args_rhs)
# finite diff approximation
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])
assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02, atol=0.03)
@pytest.mark.parametrize(
"jax_dist, params",
[
(dist.Gamma, (1.0,)),
(dist.Gamma, (0.1,)),
(dist.Gamma, (10.0,)),
(dist.Chi2, (1.0,)),
(dist.Chi2, (0.1,)),
(dist.Chi2, (10.0,)),
(dist.Beta, (1.0, 1.0)),
(dist.StudentT, (5.0, 2.0, 4.0)),
],
)
def test_pathwise_gradient(jax_dist, params):
rng_key = random.PRNGKey(0)
N = 1000000
def f(params):
z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))
return (z + z**2).mean(0)
def g(params):
d = jax_dist(*params)
return d.mean + d.variance + d.mean**2
actual_grad = grad(f)(params)
expected_grad = grad(g)(params)
assert_allclose(actual_grad, expected_grad, rtol=0.005)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_jit_log_likelihood(jax_dist, sp_dist, params):
if jax_dist.__name__ in (
"EulerMaruyama",
"GaussianRandomWalk",
"_ImproperWrapper",
"LKJ",
"LKJCholesky",
"_SparseCAR",
):
pytest.xfail(reason="non-jittable params")
rng_key = random.PRNGKey(0)
samples = jax_dist(*params).sample(key=rng_key, sample_shape=(2, 3))
def log_likelihood(*params):
return jax_dist(*params).log_prob(samples)
expected = log_likelihood(*params)
actual = jax.jit(log_likelihood)(*params)
assert_allclose(actual, expected, atol=2e-5, rtol=2e-5)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
@pytest.mark.parametrize("jit", [False, True])
def test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):
jit_fn = _identity if not jit else jax.jit
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert jax_dist.log_prob(samples).shape == prepend_shape + jax_dist.batch_shape
truncated_dists = (
dist.LeftTruncatedDistribution,
dist.RightTruncatedDistribution,
dist.TwoSidedTruncatedDistribution,
)
if sp_dist is None:
if isinstance(jax_dist, truncated_dists):
if isinstance(params[0], dist.Distribution):
# new api
loc, scale, low, high = (
params[0].loc,
params[0].scale,
params[1],
params[2],
)
else:
# old api
loc, scale, low, high = params
if low is None:
low = -np.inf
if high is None:
high = np.inf
sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)
expected = sp_dist.logpdf(samples) - jnp.log(
sp_dist.cdf(high) - sp_dist.cdf(low)
)
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)
return
pytest.skip("no corresponding scipy distn.")
if _is_batched_multivariate(jax_dist):
pytest.skip("batching not allowed in multivariate distns.")
if jax_dist.event_shape and prepend_shape:
# >>> d = sp.dirichlet([1.1, 1.1])
# >>> samples = d.rvs(size=(2,))
# >>> d.logpdf(samples)
# ValueError: The input vector 'x' must lie within the normal simplex ...
pytest.skip("batched samples cannot be scored by multivariate distributions.")
sp_dist = sp_dist(*params)
try:
expected = sp_dist.logpdf(samples)
except AttributeError:
expected = sp_dist.logpmf(samples)
except ValueError as e:
# precision issue: jnp.sum(x / jnp.sum(x)) = 0.99999994 != 1
if "The input vector 'x' must lie within the normal simplex." in str(e):
samples = jax.device_get(samples).astype("float64")
samples = samples / samples.sum(axis=-1, keepdims=True)
expected = sp_dist.logpdf(samples)
else:
raise e
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)
def test_mixture_log_prob():
gmm = dist.MixtureSameFamily(
dist.Categorical(logits=np.zeros(2)), dist.Normal(0, 1).expand([2])
)
actual = gmm.log_prob(0.0)
expected = dist.Normal(0, 1).log_prob(0.0)
assert_allclose(actual, expected)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params",
# TODO: add more complete pattern for Discrete.cdf
CONTINUOUS + [T(dist.Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))],
)
@pytest.mark.filterwarnings("ignore:overflow encountered:RuntimeWarning")
def test_cdf_and_icdf(jax_dist, sp_dist, params):
d = jax_dist(*params)
if d.event_dim > 0:
pytest.skip("skip testing cdf/icdf methods of multivariate distributions")
samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))
quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())
try:
rtol = 2e-3 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-5
if d.shape() == () and not d.is_discrete:
assert_allclose(
jax.vmap(jax.grad(d.cdf))(samples),
jnp.exp(d.log_prob(samples)),
atol=1e-5,
rtol=rtol,
)
assert_allclose(
jax.vmap(jax.grad(d.icdf))(quantiles),
jnp.exp(-d.log_prob(d.icdf(quantiles))),
atol=1e-5,
rtol=rtol,
)
assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-5, rtol=1e-5)
assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-5, rtol=rtol)
except NotImplementedError:
pass
# test against scipy
if not sp_dist:
pytest.skip("no corresponding scipy distn.")
sp_dist = sp_dist(*params)
try:
actual_cdf = d.cdf(samples)
expected_cdf = sp_dist.cdf(samples)
assert_allclose(actual_cdf, expected_cdf, atol=1e-5, rtol=1e-5)
actual_icdf = d.icdf(quantiles)
expected_icdf = sp_dist.ppf(quantiles)
assert_allclose(actual_icdf, expected_icdf, atol=1e-4, rtol=1e-4)
except NotImplementedError:
pass
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DIRECTIONAL)
def test_gof(jax_dist, sp_dist, params):
if "Improper" in jax_dist.__name__:
pytest.skip("distribution has improper .log_prob()")
if "LKJ" in jax_dist.__name__:
pytest.xfail("incorrect submanifold scaling")
if jax_dist is dist.EulerMaruyama:
d = jax_dist(*params)
if d.event_dim > 1:
pytest.skip("EulerMaruyama skip test when event shape is non-trivial.")
num_samples = 10000
if "BetaProportion" in jax_dist.__name__:
num_samples = 20000
rng_key = random.PRNGKey(0)
d = jax_dist(*params)
samples = d.sample(key=rng_key, sample_shape=(num_samples,))
probs = np.exp(d.log_prob(samples))
dim = None
if jax_dist is dist.ProjectedNormal:
dim = samples.shape[-1] - 1
# Test each batch independently.
probs = probs.reshape(num_samples, -1)
samples = samples.reshape(probs.shape + d.event_shape)
if "Dirichlet" in jax_dist.__name__:
# The Dirichlet density is over all but one of the probs.
samples = samples[..., :-1]
for b in range(probs.shape[1]):
try:
gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)
except InvalidTest:
pytest.skip("expensive test")
else:
assert gof > TEST_FAILURE_RATE
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DISCRETE)
def test_independent_shape(jax_dist, sp_dist, params):
d = jax_dist(*params)
batch_shape, event_shape = d.batch_shape, d.event_shape
shape = batch_shape + event_shape
for i in range(len(batch_shape)):
indep = dist.Independent(d, reinterpreted_batch_ndims=i)
sample = indep.sample(random.PRNGKey(0))
event_boundary = len(shape) - len(event_shape) - i
assert indep.batch_shape == shape[:event_boundary]
assert indep.event_shape == shape[event_boundary:]
assert jnp.shape(indep.log_prob(sample)) == shape[:event_boundary]
def _tril_cholesky_to_tril_corr(x):
w = vec_to_tril_matrix(x, diagonal=-1)
diag = jnp.sqrt(1 - jnp.sum(w**2, axis=-1))
cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])
corr = jnp.matmul(cholesky, cholesky.T)
return matrix_to_tril_vec(corr, diagonal=-1)
@pytest.mark.parametrize("dimension", [2, 3, 5])
def test_log_prob_LKJCholesky_uniform(dimension):
# When concentration=1, the distribution of correlation matrices is uniform.
# We will test that fact here.
d = dist.LKJCholesky(dimension=dimension, concentration=1)
N = 5
corr_log_prob = []
for i in range(N):
sample = d.sample(random.PRNGKey(i))
log_prob = d.log_prob(sample)
sample_tril = matrix_to_tril_vec(sample, diagonal=-1)
cholesky_to_corr_jac = np.linalg.slogdet(
jax.jacobian(_tril_cholesky_to_tril_corr)(sample_tril)
)[1]
corr_log_prob.append(log_prob - cholesky_to_corr_jac)
corr_log_prob = np.array(corr_log_prob)
# test if they are constant
assert_allclose(
corr_log_prob,
jnp.broadcast_to(corr_log_prob[0], corr_log_prob.shape),
rtol=1e-6,
)
if dimension == 2:
# when concentration = 1, LKJ gives a uniform distribution over correlation matrix,
# hence for the case dimension = 2,
# density of a correlation matrix will be Uniform(-1, 1) = 0.5.
# In addition, jacobian of the transformation from cholesky -> corr is 1 (hence its
# log value is 0) because the off-diagonal lower triangular element does not change
# in the transform.
# So target_log_prob = log(0.5)
assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-6)
@pytest.mark.parametrize("dimension", [2, 3, 5])
@pytest.mark.parametrize("concentration", [0.6, 2.2])
def test_log_prob_LKJCholesky(dimension, concentration):
# We will test against the fact that LKJCorrCholesky can be seen as a
# TransformedDistribution with base distribution is a distribution of partial
# correlations in C-vine method (modulo an affine transform to change domain from (0, 1)
# to (1, 0)) and transform is a signed stick-breaking process.
d = dist.LKJCholesky(dimension, concentration, sample_method="cvine")
beta_sample = d._beta.sample(random.PRNGKey(0))
beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))
partial_correlation = 2 * beta_sample - 1
affine_logdet = beta_sample.shape[-1] * jnp.log(2)
sample = signed_stick_breaking_tril(partial_correlation)
# compute signed stick breaking logdet
inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2 # noqa: E731
inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(partial_correlation)))
unconstrained = inv_tanh(partial_correlation)
corr_cholesky_logdet = biject_to(constraints.corr_cholesky).log_abs_det_jacobian(
unconstrained, sample
)
signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet
actual_log_prob = d.log_prob(sample)
expected_log_prob = beta_log_prob - affine_logdet - signed_stick_breaking_logdet
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-5)
assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-6)
def test_zero_inflated_logits_probs_agree():
concentration = np.exp(np.random.normal(1))
rate = np.exp(np.random.normal(1))
d = dist.GammaPoisson(concentration, rate)
gate_logits = np.random.normal(0)
gate_probs = expit(gate_logits)
zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)
zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)
sample = np.random.randint(
0,
20,
(
1000,
100,
),
)
assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_ZIP_log_prob(rate):
# if gate is 0 ZIP is Poisson
zip_ = dist.ZeroInflatedPoisson(0.0, rate)
pois = dist.Poisson(rate)
s = zip_.sample(random.PRNGKey(0), (20,))
zip_prob = zip_.log_prob(s)
pois_prob = pois.log_prob(s)
assert_allclose(zip_prob, pois_prob, rtol=1e-6)
# if gate is 1 ZIP is Delta(0)
zip_ = dist.ZeroInflatedPoisson(1.0, rate)
delta = dist.Delta(0.0)
s = np.array([0.0, 1.0])
zip_prob = zip_.log_prob(s)
delta_prob = delta.log_prob(s)
assert_allclose(zip_prob, delta_prob, rtol=1e-6)
@pytest.mark.parametrize("total_count", [1, 2, 3, 10])
@pytest.mark.parametrize("shape", [(1,), (3, 1), (2, 3, 1)])
def test_beta_binomial_log_prob(total_count, shape):
concentration0 = np.exp(np.random.normal(size=shape))
concentration1 = np.exp(np.random.normal(size=shape))
value = jnp.arange(1 + total_count)
num_samples = 100000
probs = np.random.beta(concentration1, concentration0, size=(num_samples,) + shape)
log_probs = dist.Binomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.BetaBinomial(concentration1, concentration0, total_count).log_prob(
value
)
assert_allclose(actual, expected, rtol=0.02)
@pytest.mark.parametrize("total_count", [1, 2, 3, 10])
@pytest.mark.parametrize("batch_shape", [(1,), (3, 1), (2, 3, 1)])
def test_dirichlet_multinomial_log_prob(total_count, batch_shape):
event_shape = (3,)
concentration = np.exp(np.random.normal(size=batch_shape + event_shape))
# test on one-hots
value = total_count * jnp.eye(event_shape[-1]).reshape(
event_shape + (1,) * len(batch_shape) + event_shape
)
num_samples = 100000
probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (num_samples, 1))
log_probs = dist.Multinomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.DirichletMultinomial(concentration, total_count).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize("shape", [(1,), (3, 1), (2, 3, 1)])
def test_gamma_poisson_log_prob(shape):
gamma_conc = np.exp(np.random.normal(size=shape))
gamma_rate = np.exp(np.random.normal(size=shape))
value = jnp.arange(15)
num_samples = 300000
poisson_rate = np.random.gamma(
gamma_conc, 1 / gamma_rate, size=(num_samples,) + shape
)
log_probs = dist.Poisson(poisson_rate).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_log_prob_gradient(jax_dist, sp_dist, params):
if jax_dist in [dist.LKJ, dist.LKJCholesky]:
pytest.skip("we have separated tests for LKJCholesky distribution")
if jax_dist is _ImproperWrapper:
pytest.skip("no param for ImproperUniform to test for log_prob gradient")
rng_key = random.PRNGKey(0)
value = jax_dist(*params).sample(rng_key)
def fn(*args):
return jnp.sum(jax_dist(*args).log_prob(value))
eps = 1e-3
for i in range(len(params)):
if jax_dist is dist.EulerMaruyama and i == 1:
# skip taking grad w.r.t. sde_fn
continue
if jax_dist is _SparseCAR and i == 3:
# skip taking grad w.r.t. adj_matrix
continue
if isinstance(
params[i], dist.Distribution
): # skip taking grad w.r.t. base_dist
continue
if params[i] is None or jnp.result_type(params[i]) in (jnp.int32, jnp.int64):
continue
actual_grad = jax.grad(fn, i)(*params)
args_lhs = [p if j != i else p - eps for j, p in enumerate(params)]
args_rhs = [p if j != i else p + eps for j, p in enumerate(params)]
fn_lhs = fn(*args_lhs)
fn_rhs = fn(*args_rhs)
# finite diff approximation
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad) == jnp.shape(params[i])
if i == 0 and jax_dist is dist.Delta:
# grad w.r.t. `value` of Delta distribution will be 0
# but numerical value will give nan (= inf - inf)
expected_grad = 0.0
assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01, atol=0.01)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_mean_var(jax_dist, sp_dist, params):
if jax_dist is _ImproperWrapper:
pytest.skip("Improper distribution does not has mean/var implemented")
if jax_dist is FoldedNormal:
pytest.skip("Folded distribution does not has mean/var implemented")
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama distribution does not has mean/var implemented")
if jax_dist is dist.RelaxedBernoulliLogits:
pytest.skip("RelaxedBernoulli distribution does not has mean/var implemented")
if "SineSkewed" in jax_dist.__name__:
pytest.skip("Skewed Distribution are not symmetric about location.")
if jax_dist in (
_TruncatedNormal,
_TruncatedCauchy,
dist.LeftTruncatedDistribution,
dist.RightTruncatedDistribution,
dist.TwoSidedTruncatedDistribution,
):
pytest.skip("Truncated distributions do not has mean/var implemented")
if jax_dist is dist.ProjectedNormal:
pytest.skip("Mean is defined in submanifold")
n = (
20000
if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.SineBivariateVonMises]
else 200000
)
d_jax = jax_dist(*params)
k = random.PRNGKey(0)
samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)
# check with suitable scipy implementation if available
# XXX: VonMises is already tested below
if (
sp_dist
and not _is_batched_multivariate(d_jax)
and jax_dist
not in [dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]
):
d_sp = sp_dist(*params)
try:
sp_mean = d_sp.mean()
except TypeError: # mvn does not have .mean() method
sp_mean = d_sp.mean
# for multivariate distns try .cov first
if d_jax.event_shape:
try:
sp_var = jnp.diag(d_sp.cov())
except TypeError: # mvn does not have .cov() method
sp_var = jnp.diag(d_sp.cov)
except AttributeError:
sp_var = d_sp.var()
else:
sp_var = d_sp.var()
assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-7)
assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-7)
if jnp.all(jnp.isfinite(sp_mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)
if jnp.all(jnp.isfinite(sp_var)):
assert_allclose(
jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2
)
elif jax_dist in [dist.LKJ, dist.LKJCholesky]:
if jax_dist is dist.LKJCholesky:
corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))
else:
corr_samples = samples
dimension, concentration, _ = params
# marginal of off-diagonal entries
marginal = dist.Beta(
concentration + 0.5 * (dimension - 2), concentration + 0.5 * (dimension - 2)
)
# scale statistics due to linear mapping
marginal_mean = 2 * marginal.mean - 1
marginal_std = 2 * jnp.sqrt(marginal.variance)
expected_mean = jnp.broadcast_to(
jnp.reshape(marginal_mean, jnp.shape(marginal_mean) + (1, 1)),
jnp.shape(marginal_mean) + d_jax.event_shape,
)
expected_std = jnp.broadcast_to(
jnp.reshape(marginal_std, jnp.shape(marginal_std) + (1, 1)),
jnp.shape(marginal_std) + d_jax.event_shape,
)
# diagonal elements of correlation matrices are 1
expected_mean = expected_mean * (1 - jnp.identity(dimension)) + jnp.identity(
dimension
)
expected_std = expected_std * (1 - jnp.identity(dimension))
assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol=0.01)
assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)
elif jax_dist in [dist.VonMises]:
# circular mean = sample mean
assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=1e-2)
# circular variance
x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)
expected_variance = 1 - jnp.sqrt(x**2 + y**2)
assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=1e-2)
elif jax_dist in [dist.SineBivariateVonMises]:
phi_loc = _circ_mean(samples[..., 0])
psi_loc = _circ_mean(samples[..., 1])
assert_allclose(
d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1), rtol=0.05, atol=1e-2
)
elif jax_dist in [dist.MatrixNormal]:
sample_shape = (200_000,)
# use X ~ MN(loc, U, V) then vec(X) ~ MVN(vec(loc), kron(V, U))
if len(d_jax.batch_shape) > 0:
axes = [len(sample_shape) + i for i in range(len(d_jax.batch_shape))]
axes = tuple(axes)
samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))
subshape = samples_re.shape[: len(axes)]
ixi = product(*[range(k) for k in subshape])
for ix in ixi:
# mean
def get_min_shape(ix, batch_shape):
return min(ix, tuple(map(lambda x: x - 1, batch_shape)))
ix_loc = get_min_shape(ix, d_jax.loc.shape[: len(ix)])
jnp.allclose(
jnp.mean(samples_re[ix], 0),
jnp.squeeze(d_jax.mean[ix_loc]),
rtol=0.5,
atol=1e-2,
)
# cov
samples_mvn = jnp.squeeze(samples_re[ix]).reshape(
sample_shape + (-1,), order="F"
)
ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[: len(ix)])
ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[: len(ix)])
scale_tril = my_kron(
d_jax.scale_tril_column[ix_col],
d_jax.scale_tril_row[ix_row],
)
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)
else: # unbatched
# mean
jnp.allclose(
jnp.mean(samples, 0),
jnp.squeeze(d_jax.mean),
rtol=0.5,
atol=1e-2,
)
# cov
samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,), order="F")
scale_tril = my_kron(
jnp.squeeze(d_jax.scale_tril_column), jnp.squeeze(d_jax.scale_tril_row)
)
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)
else:
if jnp.all(jnp.isfinite(d_jax.mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)
if isinstance(d_jax, dist.CAR):
pytest.skip("CAR distribution does not have `variance` implemented.")
if isinstance(d_jax, dist.Gompertz):
pytest.skip("Gompertz distribution does not have `variance` implemented.")
if jnp.all(jnp.isfinite(d_jax.variance)):
assert_allclose(
jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2
)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
def test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):
if jax_dist in (
_TruncatedNormal,
_TruncatedCauchy,
_GaussianMixture,
_Gaussian2DMixture,
_GeneralMixture,
_General2DMixture,
):
pytest.skip(f"{jax_dist.__name__} is a function, not a class")
dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]
valid_params, oob_params = list(params), list(params)
key = random.PRNGKey(1)
dependent_constraint = False
for i in range(len(params)):
if (
jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky)
and dist_args[i] != "concentration"
):
continue
if "SineSkewed" in jax_dist.__name__ and dist_args[i] != "skewness":
continue
if jax_dist is dist.EulerMaruyama and dist_args[i] != "t":
continue
if (
jax_dist is dist.TwoSidedTruncatedDistribution
and dist_args[i] == "base_dist"
):
continue
if jax_dist is dist.GaussianRandomWalk and dist_args[i] == "num_steps":
continue
if (
jax_dist is dist.SineBivariateVonMises
and dist_args[i] == "weighted_correlation"
):
continue
if params[i] is None:
oob_params[i] = None
valid_params[i] = None
continue
constraint = jax_dist.arg_constraints[dist_args[i]]
if isinstance(constraint, constraints._Dependent):
dependent_constraint = True
break
key, key_gen = random.split(key)
oob_params[i] = gen_values_outside_bounds(
constraint, jnp.shape(params[i]), key_gen
)
valid_params[i] = gen_values_within_bounds(
constraint, jnp.shape(params[i]), key_gen
)
if jax_dist is dist.MultivariateStudentT:
# As mean is only defined for df > 1 & we instantiate
# scipy.stats.multivariate_t with same mean as jax_dist
# we need to ensure this is defined, so force df >= 1
valid_params[0] += 1
if jax_dist is dist.LogUniform:
# scipy.stats.loguniform take parameter a and b
# which is a > 0 and b > a.
# gen_values_within_bounds() generates just
# a > 0 and b > 0. Then, make b = a + b.
valid_params[1] += valid_params[0]
assert jax_dist(*oob_params)
# Invalid parameter values throw ValueError
if not dependent_constraint and (
jax_dist is not _ImproperWrapper and "SineSkewed" not in jax_dist.__name__
):
with pytest.raises(ValueError):
jax_dist(*oob_params, validate_args=True)
with pytest.raises(ValueError):
# test error raised under jit omnistaging
oob_params = jax.device_get(oob_params)
def dist_gen_fn():
d = jax_dist(*oob_params, validate_args=True)
return d
jax.jit(dist_gen_fn)()
d = jax_dist(*valid_params, validate_args=True)
# Test agreement of log density evaluation on randomly generated samples
# with scipy's implementation when available.
if (
sp_dist
and not _is_batched_multivariate(d)
and not (d.event_shape and prepend_shape)
):
valid_samples = gen_values_within_bounds(
d.support, size=prepend_shape + d.batch_shape + d.event_shape
)
try:
expected = sp_dist(*valid_params).logpdf(valid_samples)
except AttributeError:
expected = sp_dist(*valid_params).logpmf(valid_samples)
assert_allclose(d.log_prob(valid_samples), expected, atol=1e-5, rtol=1e-5)
# Out of support samples throw ValueError
oob_samples = gen_values_outside_bounds(
d.support, size=prepend_shape + d.batch_shape + d.event_shape
)
with pytest.warns(UserWarning, match="Out-of-support"):
d.log_prob(oob_samples)
with pytest.warns(UserWarning, match="Out-of-support"):
# test warning work under jit omnistaging
oob_samples = jax.device_get(oob_samples)
valid_params = jax.device_get(valid_params)
def log_prob_fn():
d = jax_dist(*valid_params, validate_args=True)
return d.log_prob(oob_samples)
jax.jit(log_prob_fn)()
def test_omnistaging_invalid_param():
def f(x):
return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)
with pytest.raises(ValueError, match="got invalid"):
jax.jit(f)(0)
def test_omnistaging_invalid_sample():
def f(x):
return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)
with pytest.warns(UserWarning, match="Out-of-support"):
jax.jit(f)(0)
def test_categorical_log_prob_grad():
data = jnp.repeat(jnp.arange(3), 10)
def f(x):
return (
dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(data).sum()
)
def g(x):
return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data).sum()
x = 0.5
fx, grad_fx = jax.value_and_grad(f)(x)
gx, grad_gx = jax.value_and_grad(g)(x)
assert_allclose(fx, gx, rtol=1e-6)
assert_allclose(grad_fx, grad_gx, atol=1e-4)
def test_beta_proportion_invalid_mean():
with dist.distribution.validation_enabled(), pytest.raises(
ValueError, match=r"^BetaProportion distribution got invalid mean parameter\.$"
):
dist.BetaProportion(1.0, 1.0)
########################################
# Tests for constraints and transforms #
########################################
@pytest.mark.parametrize(
"constraint, x, expected",
[
(constraints.boolean, np.array([True, False]), np.array([True, True])),
(constraints.boolean, np.array([1, 1]), np.array([True, True])),
(constraints.boolean, np.array([-1, 1]), np.array([False, True])),
(
constraints.corr_cholesky,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),
np.array([True, False]),
), # NB: not lower_triangular
(
constraints.corr_cholesky,
np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),
np.array([False, False]),
), # NB: not positive_diagonal & not unit_norm_row
(
constraints.corr_matrix,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),
np.array([True, False]),
), # NB: not lower_triangular
(
constraints.corr_matrix,
np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),
np.array([False, False]),
), # NB: not unit diagonal
(constraints.greater_than(1), 3, True),
(
constraints.greater_than(1),
np.array([-1, 1, 5]),
np.array([False, False, True]),
),
(constraints.integer_interval(-3, 5), 0, True),
(
constraints.integer_interval(-3, 5),
np.array([-5, -3, 0, 1.1, 5, 7]),
np.array([False, True, True, False, True, False]),
),
(constraints.interval(-3, 5), 0, True),
(
constraints.interval(-3, 5),
np.array([-5, -3, 0, 5, 7]),
np.array([False, True, True, True, False]),
),
(constraints.less_than(1), -2, True),
(
constraints.less_than(1),
np.array([-1, 1, 5]),
np.array([True, False, False]),
),
(constraints.lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True),
(
constraints.lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),
np.array([False, False]),
),
(constraints.nonnegative_integer, 3, True),
(
constraints.nonnegative_integer,
np.array([-1.0, 0.0, 5.0]),
np.array([False, True, True]),
),
(constraints.positive, 3, True),
(constraints.positive, np.array([-1, 0, 5]), np.array([False, False, True])),
(constraints.positive_definite, np.array([[1.0, 0.3], [0.3, 1.0]]), True),
(
constraints.positive_definite,
np.array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]),
np.array([False, False]),
),
(constraints.positive_integer, 3, True),
(
constraints.positive_integer,
np.array([-1.0, 0.0, 5.0]),
np.array([False, False, True]),
),
(constraints.real, -1, True),
(
constraints.real,
np.array([np.inf, -np.inf, np.nan, np.pi]),
np.array([False, False, False, True]),
),
(constraints.simplex, np.array([0.1, 0.3, 0.6]), True),
(
constraints.simplex,
np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),
np.array([True, False, False]),
),
(constraints.softplus_positive, 3, True),
(
constraints.softplus_positive,
np.array([-1, 0, 5]),
np.array([False, False, True]),
),
(
constraints.softplus_lower_cholesky,
np.array([[1.0, 0.0], [-2.0, 0.1]]),
True,
),
(
constraints.softplus_lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),
np.array([False, False]),
),
(constraints.unit_interval, 0.1, True),
(
constraints.unit_interval,
np.array([-5, 0, 0.5, 1, 7]),
np.array([False, True, True, True, False]),
),
(
constraints.sphere,
np.array([[1, 0, 0], [0.5, 0.5, 0]]),
np.array([True, False]),
),
(
constraints.open_interval(0.0, 1.0),
np.array([-5, 0, 0.5, 1, 7]),
np.array([False, False, True, False, False]),
),
],
)
def test_constraints(constraint, x, expected):
v = constraint.feasible_like(x)
if jnp.result_type(v) == "float32" or jnp.result_type(v) == "float64":
assert not constraint.is_discrete
assert_array_equal(constraint(x), expected)
feasible_value = constraint.feasible_like(x)
assert jnp.shape(feasible_value) == jnp.shape(x)
assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected), True))
try:
inverse = biject_to(constraint).inv(feasible_value)
except NotImplementedError:
pass
else:
assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-7)
@pytest.mark.parametrize(
"constraint",
[
constraints.corr_cholesky,
constraints.corr_matrix,
constraints.greater_than(2),
constraints.interval(-3, 5),
constraints.l1_ball,
constraints.less_than(1),
constraints.lower_cholesky,
constraints.scaled_unit_lower_cholesky,
constraints.ordered_vector,
constraints.positive,
constraints.positive_definite,
constraints.positive_ordered_vector,
constraints.real,
constraints.real_vector,
constraints.simplex,
constraints.softplus_positive,
constraints.softplus_lower_cholesky,
constraints.unit_interval,
constraints.open_interval(0.0, 1.0),
],
ids=lambda x: x.__class__,
)
@pytest.mark.parametrize("shape", [(), (1,), (3,), (6,), (3, 1), (1, 3), (5, 3)])
def test_biject_to(constraint, shape):
transform = biject_to(constraint)
event_dim = transform.domain.event_dim
if isinstance(constraint, constraints._Interval):
assert transform.codomain.upper_bound == constraint.upper_bound
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._GreaterThan):
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._LessThan):
assert transform.codomain.upper_bound == constraint.upper_bound
if len(shape) < event_dim:
return
rng_key = random.PRNGKey(0)
x = random.normal(rng_key, shape)
y = transform(x)
assert transform.forward_shape(x.shape) == y.shape
assert transform.inverse_shape(y.shape) == x.shape
# test inv work for NaN arrays:
x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))
assert x_nan.shape == x.shape
# test codomain
batch_shape = shape if event_dim == 0 else shape[:-1]
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=jnp.bool_))
# test inv
z = transform.inv(y)
assert_allclose(x, z, atol=1e-5, rtol=1e-5)
# test domain, currently all is constraints.real or constraints.real_vector
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
# test log_abs_det_jacobian
actual = transform.log_abs_det_jacobian(x, y)
assert jnp.shape(actual) == batch_shape
if len(shape) == event_dim:
if constraint is constraints.simplex:
expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)[:, :-1])[1]
elif constraint in [
constraints.real_vector,
constraints.ordered_vector,
constraints.positive_ordered_vector,
constraints.l1_ball,
]:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
elif constraint in [constraints.corr_cholesky, constraints.corr_matrix]:
vec_transform = lambda x: matrix_to_tril_vec( # noqa: E731
transform(x), diagonal=-1
)
y_tril = matrix_to_tril_vec(y, diagonal=-1)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y, diagonal=-1)
if constraint is constraints.corr_matrix:
# fill the upper triangular part
matrix = (
matrix
+ jnp.swapaxes(matrix, -2, -1)
+ jnp.identity(matrix.shape[-1])
)
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]
elif constraint in [
constraints.lower_cholesky,
constraints.scaled_unit_lower_cholesky,
constraints.positive_definite,
constraints.softplus_lower_cholesky,
]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x)) # noqa: E731
y_tril = matrix_to_tril_vec(y)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y)
if constraint is constraints.positive_definite:
# fill the upper triangular part
matrix = (
matrix
+ jnp.swapaxes(matrix, -2, -1)
- jnp.diag(jnp.diag(matrix))
)
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-5, rtol=1e-5)
assert_allclose(actual, -inv_expected, atol=1e-5, rtol=1e-5)
# NB: skip transforms which are tested in `test_biject_to`
@pytest.mark.parametrize(
"transform, event_shape",
[
(PermuteTransform(np.array([3, 0, 4, 1, 2])), (5,)),
(PowerTransform(2.0), ()),
(SoftplusTransform(), ()),
(
LowerCholeskyAffine(
np.array([1.0, 2.0]), np.array([[0.6, 0.0], [1.5, 0.4]])
),
(2,),
),
(
transforms.ComposeTransform(
[
biject_to(constraints.simplex),
SimplexToOrderedTransform(0.0),
biject_to(constraints.ordered_vector).inv,
]
),
(5,),
),
],
)
@pytest.mark.parametrize(
"batch_shape",
[
(),
(1,),
(3,),
(6,),
(3, 1),
(1, 3),
(5, 3),
],
)
def test_bijective_transforms(transform, event_shape, batch_shape):
shape = batch_shape + event_shape
rng_key = random.PRNGKey(0)
x = biject_to(transform.domain)(random.normal(rng_key, shape))
y = transform(x)
# test codomain
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))
# test inv
z = transform.inv(y)
assert_allclose(x, z, atol=1e-6, rtol=1e-4)
assert transform.inv.inv is transform
assert transform.inv is transform.inv
assert transform.domain is transform.inv.codomain
assert transform.codomain is transform.inv.domain
# test domain
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
# test log_abs_det_jacobian
actual = transform.log_abs_det_jacobian(x, y)
assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))
assert jnp.shape(actual) == batch_shape
if len(shape) == transform.domain.event_dim:
if len(event_shape) == 1:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-6)
assert_allclose(actual, -inv_expected, atol=1e-6)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_composed_transform(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t1])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 2
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
expected_log_det = (
jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2) + jnp.log(2) * 9
)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_composed_transform_1(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t2])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 3
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
z = t2(x * 2)
expected_log_det = (
jnp.log(2) * 6
+ t2.log_abs_det_jacobian(x * 2, z)
+ t2.log_abs_det_jacobian(z, t2(z)).sum(-1)
)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_simplex_to_order_transform(batch_shape):
simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()
simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)
transform = SimplexToOrderedTransform()
out = transform(simplex)
assert out.shape == transform.forward_shape(simplex.shape)
assert simplex.shape == transform.inverse_shape(out.shape)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
@pytest.mark.parametrize("prepend_event_shape", [(), (4,)])
@pytest.mark.parametrize("sample_shape", [(), (7,)])
def test_transformed_distribution(batch_shape, prepend_event_shape, sample_shape):
base_dist = (
dist.Normal(0, 1)
.expand(batch_shape + prepend_event_shape + (6,))
.to_event(1 + len(prepend_event_shape))
)
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
d = dist.TransformedDistribution(base_dist, [t1, t2, t1])
assert d.event_dim == 2 + len(prepend_event_shape)
y = d.sample(random.PRNGKey(0), sample_shape)
t = transforms.ComposeTransform([t1, t2, t1])
x = t.inv(y)
assert x.shape == sample_shape + base_dist.shape()
log_prob = d.log_prob(y)
assert log_prob.shape == sample_shape + batch_shape
t_log_det = t.log_abs_det_jacobian(x, y)
if prepend_event_shape:
t_log_det = t_log_det.sum(-1)
expected_log_prob = base_dist.log_prob(x) - t_log_det
assert_allclose(log_prob, expected_log_prob, atol=1e-5)
@pytest.mark.parametrize(
"transformed_dist",
[
dist.TransformedDistribution(
dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()
),
dist.TransformedDistribution(
dist.Exponential(jnp.ones(2)),
[
transforms.PowerTransform(0.7),
transforms.AffineTransform(0.0, jnp.ones(2) * 3),
],
),
],
)
def test_transformed_distribution_intermediates(transformed_dist):
sample, intermediates = transformed_dist.sample_with_intermediates(
random.PRNGKey(1)
)
assert_allclose(
transformed_dist.log_prob(sample, intermediates),
transformed_dist.log_prob(sample),
)
def test_transformed_transformed_distribution():
loc, scale = -2, 3
dist1 = dist.TransformedDistribution(
dist.Normal(2, 3), transforms.PowerTransform(2.0)
)
dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(-2, 3))
assert isinstance(dist2.base_dist, dist.Normal)
assert len(dist2.transforms) == 2
assert isinstance(dist2.transforms[0], transforms.PowerTransform)
assert isinstance(dist2.transforms[1], transforms.AffineTransform)
rng_key = random.PRNGKey(0)
assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))
intermediates = dist2.sample_with_intermediates(rng_key)
assert len(intermediates) == 2
def _make_iaf(input_dim, hidden_dims, rng_key):
arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])
_, init_params = arn_init(rng_key, (input_dim,))
return InverseAutoregressiveTransform(partial(arn, init_params))
@pytest.mark.parametrize(
"ts",
[
[transforms.PowerTransform(0.7), transforms.AffineTransform(2.0, 3.0)],
[transforms.ExpTransform()],
[
transforms.ComposeTransform(
[transforms.AffineTransform(-2, 3), transforms.ExpTransform()]
),
transforms.PowerTransform(3.0),
],
[
_make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(0)),
transforms.PermuteTransform(jnp.arange(5)[::-1]),
_make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(1)),
],
],
)
def test_compose_transform_with_intermediates(ts):
transform = transforms.ComposeTransform(ts)
x = random.normal(random.PRNGKey(2), (7, 5))
y, intermediates = transform.call_with_intermediates(x)
logdet = transform.log_abs_det_jacobian(x, y, intermediates)
assert_allclose(y, transform(x))
assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))
@pytest.mark.parametrize("x_dim, y_dim", [(3, 3), (3, 4)])
def test_unpack_transform(x_dim, y_dim):
xy = np.random.randn(x_dim + y_dim)
unpack_fn = lambda xy: {"x": xy[:x_dim], "y": xy[x_dim:]} # noqa: E731
transform = transforms.UnpackTransform(unpack_fn)
z = transform(xy)
if x_dim == y_dim:
with pytest.warns(UserWarning, match="UnpackTransform.inv"):
t = transform.inv(z)
else:
t = transform.inv(z)
assert_allclose(t, xy)
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS)
def test_generated_sample_distribution(
jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)
):
"""On samplers that we do not get directly from JAX, (e.g. we only get
Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test
agreement in the empirical distribution of generated samples between our
samplers and those from SciPy.
"""
if jax_dist not in [dist.Gumbel]:
pytest.skip(
"{} sampling method taken from upstream, no need to"
"test generated samples.".format(jax_dist.__name__)
)
jax_dist = jax_dist(*params)
if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:
our_samples = jax_dist.sample(key, (N_sample,))
ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)
assert ks_result.pvalue > 0.05
@pytest.mark.parametrize(
"jax_dist, params, support",
[
(dist.BernoulliLogits, (5.0,), jnp.arange(2)),
(dist.BernoulliProbs, (0.5,), jnp.arange(2)),
(dist.BinomialLogits, (4.5, 10), jnp.arange(11)),
(dist.BinomialProbs, (0.5, 11), jnp.arange(12)),
(dist.BetaBinomial, (2.0, 0.5, 12), jnp.arange(13)),
(dist.CategoricalLogits, (np.array([3.0, 4.0, 5.0]),), jnp.arange(3)),
(dist.CategoricalProbs, (np.array([0.1, 0.5, 0.4]),), jnp.arange(3)),
],
)
@pytest.mark.parametrize("batch_shape", [(5,), ()])
@pytest.mark.parametrize("expand", [False, True])
def test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand):
p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))
actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)
expected = support.reshape((-1,) + (1,) * len(batch_shape))
if expand:
expected = jnp.broadcast_to(expected, support.shape + batch_shape)
assert_allclose(actual, expected)
def test_zero_inflated_enumerate_support():
base_dist = dist.Bernoulli(0.5)
d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)
assert d.has_enumerate_support
assert_allclose(d.enumerate_support(), base_dist.enumerate_support())
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DISCRETE)
@pytest.mark.parametrize("prepend_shape", [(), (2, 3)])
@pytest.mark.parametrize("sample_shape", [(), (4,)])
def test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):
jax_dist = jax_dist(*params)
new_batch_shape = prepend_shape + jax_dist.batch_shape
expanded_dist = jax_dist.expand(new_batch_shape)
rng_key = random.PRNGKey(0)
samples = expanded_dist.sample(rng_key, sample_shape)
assert expanded_dist.batch_shape == new_batch_shape
assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape
assert expanded_dist.log_prob(samples).shape == sample_shape + new_batch_shape
# test expand of expand
assert (
expanded_dist.expand((3,) + new_batch_shape).batch_shape
== (3,) + new_batch_shape
)
# test expand error
if prepend_shape:
with pytest.raises(ValueError, match="Cannot broadcast distribution of shape"):
assert expanded_dist.expand((3,) + jax_dist.batch_shape)
@pytest.mark.parametrize("base_shape", [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1, 5)])
@pytest.mark.parametrize("event_dim", [0, 1, 2, 3])
@pytest.mark.parametrize("sample_shape", [(1000,), (1000, 7, 1), (1000, 1, 7)])
def test_expand_shuffle_regression(base_shape, event_dim, sample_shape):
expand_shape = (2, 3, 5)
event_dim = min(event_dim, len(base_shape))
loc = random.normal(random.PRNGKey(0), base_shape) * 10
base_dist = dist.Normal(loc, 0.1).to_event(event_dim)
expanded_dist = base_dist.expand(expand_shape[: len(expand_shape) - event_dim])
samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)
expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.shape())
assert_allclose(samples.mean(0), expected_mean, atol=0.1)
@pytest.mark.parametrize("batch_shape", [(), (4,), (10, 3)])
def test_sine_bivariate_von_mises_batch_shape(batch_shape):
phi_loc = jnp.broadcast_to(jnp.array(0.0), batch_shape)
psi_loc = jnp.array(0.0)
phi_conc = jnp.array(1.0)
psi_conc = jnp.array(1.0)
corr = jnp.array(0.1)
sine = SineBivariateVonMises(phi_loc, psi_loc, phi_conc, psi_conc, corr)
assert sine.batch_shape == batch_shape
samples = sine.sample(random.PRNGKey(0))
assert samples.shape == (*batch_shape, 2)
def test_sine_bivariate_von_mises_sample_mean():
loc = jnp.array([[2.0, -1.0], [-2, 1.0]])
sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)
samples = sine.sample(random.PRNGKey(0), (5000,))
assert_allclose(_circ_mean(samples).T, loc, rtol=5e-3)
@pytest.mark.parametrize("batch_shape", [(), (4,)])
def test_polya_gamma(batch_shape, num_points=20000):
d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)
rng_key = random.PRNGKey(0)
# test density approximately normalized
x = jnp.linspace(1.0e-6, d.truncation_point, num_points)
prob = (d.truncation_point / num_points) * jnp.exp(
logsumexp(d.log_prob(x), axis=-1)
)
assert_allclose(prob, jnp.ones(batch_shape), rtol=1.0e-4)
# test mean of approximate sampler
z = d.sample(rng_key, sample_shape=(3000,))
mean = jnp.mean(z, axis=-1)
assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)
@pytest.mark.parametrize(
"extra_event_dims,expand_shape",
[(0, (4, 3, 2, 1)), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))],
)
def test_expand_reshaped_distribution(extra_event_dims, expand_shape):
loc = jnp.zeros((1, 6))
scale_tril = jnp.eye(6)
d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
full_shape = (4, 1, 1, 1, 6)
reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == expand_shape
assert large.event_shape == event_shape
# Throws error when batch shape cannot be broadcasted
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + (3,))
# Throws error when trying to shrink existing batch shape
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
@pytest.mark.parametrize(
"batch_shape, mask_shape",
[((), ()), ((2,), ()), ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))],
)
@pytest.mark.parametrize("event_shape", [(), (3,)])
def test_mask(batch_shape, event_shape, mask_shape):
jax_dist = (
dist.Normal().expand(batch_shape + event_shape).to_event(len(event_shape))
)
mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)
if mask_shape == ():
mask = bool(mask)
samples = jax_dist.sample(random.PRNGKey(1))
actual = jax_dist.mask(mask).log_prob(samples)
assert_allclose(
actual != 0,
jnp.broadcast_to(mask, lax.broadcast_shapes(batch_shape, mask_shape)),
)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 4)])
def test_mask_grad(event_shape):
def f(x, data):
base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()
mask = jnp.all(
jnp.isfinite(data), tuple(-i - 1 for i in range(len(event_shape)))
)
log_prob = base_dist.mask(mask).log_prob(data)
assert log_prob.shape == data.shape[: len(data.shape) - len(event_shape)]
return log_prob.sum()
data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])
log_prob, grad = jax.value_and_grad(f)(1.0, data)
assert jnp.isfinite(grad) and jnp.isfinite(log_prob)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_dist_pytree(jax_dist, sp_dist, params):
def f(x):
return jax_dist(*params)
if jax_dist is _ImproperWrapper:
pytest.skip("Cannot flattening ImproperUniform")
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama doesn't define flatten/unflatten")
jax.jit(f)(0) # this test for flatten/unflatten
lax.map(f, np.ones(3)) # this test for compatibility w.r.t. scan
# Test that parameters do not change after flattening.
expected_dist = f(0)
actual_dist = jax.jit(f)(0)
expected_sample = expected_dist.sample(random.PRNGKey(0))
actual_sample = actual_dist.sample(random.PRNGKey(0))
expected_log_prob = expected_dist.log_prob(expected_sample)
actual_log_prob = actual_dist.log_prob(actual_sample)
assert_allclose(actual_sample, expected_sample, rtol=1e-6)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-6)
@pytest.mark.parametrize(
"method, arg", [("to_event", 1), ("mask", False), ("expand", [5])]
)
def test_special_dist_pytree(method, arg):
def f(x):
d = dist.Normal(np.zeros(1), np.ones(1))
return getattr(d, method)(arg)
jax.jit(f)(0)
lax.map(f, np.ones(3))
def test_expand_no_unnecessary_batch_shape_expansion():
# ExpandedDistribution can mutate the `batch_shape` of
# its base distribution in order to make ExpandedDistribution
# mappable, see #684. However, this mutation should not take
# place if no mapping operation is performed.
for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):
# Low level test: ensure that (tree_flatten o tree_unflatten)(expanded_dist)
# amounts to an identity operation.
d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])
roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])
assert d.batch_shape == roundtripped_d.batch_shape
assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape
assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)
# High-level test: `jax.jit`ting a function returning an ExpandedDistribution
# (which involves an instance of the low-level case as it will transform
# the original function by adding some flattening and unflattening steps)
# should return same object as its non-jitted equivalent.
def bs(arg):
return dist.Normal(arg, arg).expand([10, 3, *arg.shape])
d = bs(arg)
dj = jax.jit(bs)(arg)
assert isinstance(d, dist.ExpandedDistribution)
assert isinstance(dj, dist.ExpandedDistribution)
assert d.batch_shape == dj.batch_shape
assert d.base_dist.batch_shape == dj.base_dist.batch_shape
assert d.base_dist.event_shape == dj.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_delta_normal_shape(batch_shape):
v = np.random.normal(size=batch_shape)
loc = np.random.normal(size=batch_shape)
scale = np.exp(np.random.normal(size=batch_shape))
p = dist.Delta(v)
q = dist.Normal(loc, scale)
assert kl_divergence(p, q).shape == batch_shape
def test_kl_delta_normal():
v = np.random.normal()
loc = np.random.normal()
scale = np.exp(np.random.normal())
p = dist.Delta(v, 10.0)
q = dist.Normal(loc, scale)
assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_independent_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))
q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))
actual = kl_divergence(
dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))
)
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_expanded_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)
q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)
actual = kl_divergence(
dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))
)
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize("shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize(
"p_dist, q_dist",
[
(dist.Beta, dist.Beta),
(dist.Gamma, dist.Gamma),
(dist.Kumaraswamy, dist.Beta),
(dist.Normal, dist.Normal),
(dist.Weibull, dist.Gamma),
],
)
def test_kl_univariate(shape, p_dist, q_dist):
def make_dist(dist_class):
params = {}
for k, c in dist_class.arg_constraints.items():
if c is constraints.real:
params[k] = np.random.normal(size=shape)
elif c is constraints.positive:
params[k] = np.exp(np.random.normal(size=shape))
else:
raise ValueError(f"Missing pattern for param {k}.")
d = dist_class(**params)
if dist_class is dist.Kumaraswamy:
d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000
return d
p = make_dist(p_dist)
q = make_dist(q_dist)
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize("shape", [(4,), (2, 3)], ids=str)
def test_kl_dirichlet_dirichlet(shape):
p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10_000,)).copy()
expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)
assert_allclose(actual, expected, rtol=0.05)
def test_vmapped_binomial_p0():
# test that vmapped binomial with p = 0 does not have an infinite loop
def sample_binomial_withp0(key):
n = 2 * (random.uniform(key) > 0.5)
_, key = random.split(key)
return dist.Binomial(total_count=n, probs=0).sample(key)
jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))
def _get_vmappable_dist_init_params(jax_dist):
if jax_dist.__name__ == ("_TruncatedCauchy"):
return [2, 3]
elif jax_dist.__name__ == ("_TruncatedNormal"):
return [2, 3]
elif issubclass(jax_dist, dist.Distribution):
init_parameters = list(inspect.signature(jax_dist.__init__).parameters.keys())[
1:
]
vmap_over_parameters = list(
inspect.signature(vmap_over.dispatch(jax_dist)).parameters.keys()
)[1:]
return list(
[
i
for i, name in enumerate(init_parameters)
if name in vmap_over_parameters
]
)
else:
raise ValueError
def _allclose_or_equal(a1, a2):
if isinstance(a1, np.ndarray):
return np.allclose(a2, a1)
elif isinstance(a1, jnp.ndarray):
return jnp.allclose(a2, a1)
elif isinstance(a1, csr_matrix):
return np.allclose(a2.todense(), a1.todense())
else:
return a2 == a1 or a2 is a1
def _tree_equal(t1, t2):
t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)
return jnp.all(jax.flatten_util.ravel_pytree(t)[0])
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_vmap_dist(jax_dist, sp_dist, params):
param_names = list(inspect.signature(jax_dist).parameters.keys())
vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)
vmappable_param_idxs = vmappable_param_idxs[: len(params)]
if len(vmappable_param_idxs) == 0:
return
def make_jax_dist(*params):
return jax_dist(*params)
def sample(d: dist.Distribution):
return d.sample(random.PRNGKey(0))
d = make_jax_dist(*params)
if isinstance(d, _SparseCAR) and d.is_sparse:
# In this case, since csr arrays are not jittable,
# _SparseCAR has a csr_matrix as part of its pytree
# definition (not as a pytree leaf). This causes pytree
# operations like tree_map to fail, since these functions
# compare the pytree def of each of the arguments using ==
# which is ambiguous for array-like objects.
return
in_out_axes_cases = [
# vmap over all args
(
tuple(0 if i in vmappable_param_idxs else None for i in range(len(params))),
0,
),
# vmap over a single arg, out over all attributes of a distribution
*(
([0 if i == idx else None for i in range(len(params))], 0)
for idx in vmappable_param_idxs
if params[idx] is not None
),
# vmap over a single arg, out over the associated attribute of the distribution
*(
(
[0 if i == idx else None for i in range(len(params))],
vmap_over(d, **{param_names[idx]: 0}),
)
for idx in vmappable_param_idxs
if params[idx] is not None
),
# vmap over a single arg, axis=1, (out single attribute, axis=1)
*(
(
[1 if i == idx else None for i in range(len(params))],
vmap_over(d, **{param_names[idx]: 1}),
)
for idx in vmappable_param_idxs
if isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).ndim > 0
# skip this distribution because _GeneralMixture.__init__ turns
# 1d inputs into 0d attributes, thus breaks the expectations of
# the vmapping test case where in_axes=1, only done for rank>=1 tensors.
and jax_dist is not _GeneralMixture
),
]
for in_axes, out_axes in in_out_axes_cases:
batched_params = [
jax.tree_map(lambda x: jnp.expand_dims(x, ax), arg)
if isinstance(ax, int)
else arg
for arg, ax in zip(params, in_axes)
]
# Recreate the jax_dist to avoid side effects coming from `d.sample`
# triggering lazy_property computations, which, in a few cases, break
# vmap_over's expectations regarding existing attributes to be vmapped.
d = make_jax_dist(*params)
batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes)(
*batched_params
)
eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(
batched_d, d
)
assert eq == jnp.array([True])
samples_dist = sample(d)
samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)
assert samples_batched_dist.shape == (1, *samples_dist.shape)
def test_multinomial_abstract_total_count():
probs = jnp.array([0.2, 0.5, 0.3])
key = random.PRNGKey(0)
def f(x):
total_count = x.sum(-1)
return dist.Multinomial(total_count, probs=probs, total_count_max=10).sample(
key
)
x = dist.Multinomial(10, probs).sample(key)
y = jax.jit(f)(x)
assert_allclose(x, y, rtol=1e-6)
def test_normal_log_cdf():
# test if log_cdf method agrees with jax.scipy.stats.norm.logcdf
# and if exp(log_cdf) agrees with cdf
loc = jnp.array([[0.0, -10.0, 20.0]])
scale = jnp.array([[1, 5, 7]])
values = jnp.linspace(-5, 5, 100).reshape(-1, 1)
numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)
numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)
jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)
assert_allclose(numpyro_log_cdf, jax_log_cdf)
assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-6)
@pytest.mark.parametrize(
"value",
[
-15.0,
jnp.array([[-15.0], [-10.0], [-5.0]]),
jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]]),
],
)
def test_truncated_normal_log_prob_in_tail(value):
# define set of distributions truncated in tail of distribution
loc = 1.35
scale = jnp.geomspace(0.01, 1, 10)
low, high = (-20, -1.0)
a, b = (low - loc) / scale, (high - loc) / scale # rescale for jax input
numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high).log_prob(
value
)
jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)
assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)
def test_sample_truncated_normal_in_tail():
# test, if samples from distributions truncated in
# tail of distribution returns any inf's
tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)
samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10_000,))
assert ~jnp.isinf(samples).any()
@jax.enable_custom_prng()
def test_jax_custom_prng():
samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))
assert ~jnp.isinf(samples).any()
|
7,563 | d77036ed07231719358658a42dc14d20453bd792 | def pattern4(n):
"""
n: length of the base of the triangle ie. the max number
of starts it will contain.
"""
for row in range(1, n+1):
for col in range(1, row+1):
print("*", end="")
print("")
if __name__ == '__main__':
n = int(input(("Enter height of the triangle: ")))
pattern4(n)
|
7,564 | 511016b9cd54f6824360d609ede233b9cc3e4447 | class Mood(object):
GENERIC = 1
HIGH_TEMP = 2
LOW_TEMP = 3
HIGH_DUST = 4
LOW_DUST = 5
def decision(self, data):
temp = float(data)
if temp <= 10:
return self.LOW_TEMP
if temp > 30:
return self.HIGH_TEMP
if (10 < temp <=30):
return self.GENERIC
|
7,565 | 9ad92b23b8a02204a86af599e507eb889e5bcec7 | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
import numpy as np
from std_msgs.msg import Int32
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 60 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 0.5
MPH_TO_MPS = 0.447
MAX_SPEED = 20 # in MPH
class WaypointUpdater(object):
def __init__(self):
#rospy.loginfo('Entered WaypointUpdater init')
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
#rospy.spin()
self.loop()
def loop(self):
rospy.loginfo('Entered WaypointUpdater loop')
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
#Get closest waypoint
#rospy.loginfo('Value of self pose is %d, %d',self.pose.pose.position.x, self.pose.pose.position.y)
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x,y], 1)[1]
#Check if closest coord is ahead of or behind the vehicle
#rospy.loginfo('Value of closest_idx is %d',closest_idx)
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx-1]
closest_vect = np.array(closest_coord)
previous_vect = np.array(prev_coord)
pos_vect = np.array([x,y])
val = np.dot(closest_vect-previous_vect, pos_vect-closest_vect)
if val > 0:
closest_idx = (closest_idx+1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, closest_idx):
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
lane.header = self.base_waypoints.header
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]
#rospy.loginfo('Entered generate_lane, farthest_idx: %d', farthest_idx)
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = self.limitMaxSpeed(base_waypoints)
else:
rospy.loginfo('Stopline index non-default: %d, closest_car index: %d', self.stopline_wp_idx, closest_idx)
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
def limitMaxSpeed(self, waypoints):
velocity = MAX_SPEED * MPH_TO_MPS
for i in range(len(waypoints)):
self.set_waypoint_velocity(waypoints, i, velocity)
return waypoints
def decelerate_waypoints(self, waypoints, closest_idx):
tmp = []
stopline_wp_idx = self.stopline_wp_idx
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(stopline_wp_idx - 5 - closest_idx, 0) # 2 waypoints back from stopline so that the front of the car is at the line
if i > stop_idx:
vel = 0
else:
#rospy.loginfo('Decelerate waypoints index values: %d, stop_idx: %d', i, stop_idx)
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.0:
vel = 0.0
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
#rospy.loginfo('Waypoint velocity for index %d is %f', i, p.twist.twist.linear.x)
tmp.append(p)
return tmp
def pose_cb(self, msg):
#rospy.loginfo('Entered pose_cb')
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
#rospy.loginfo('First waypoint: %d, %d', waypoints.waypoints[0].pose.pose.position.x, waypoints.waypoints[0].pose.pose.position.y)
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data
#rospy.loginfo('traffic_cb called with stop_idx: %d', self.stopline_wp_idx)
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
|
7,566 | 1896f4d5b304915d5cbbb30b0a83854c4a8cc60c | from wtforms import Form, StringField
class SearchForm(Form):
criteria = StringField("Texto a buscar")
|
7,567 | 42a717591fb8fe480581d8996e9811d0292d0eb1 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from cv2 import DualTVL1OpticalFlow_create as DualTVL1
from tensorflow.python.platform import flags
import os
import sys
sys.path.insert(0, '..')
from utils import image_funcs
import numpy as np
def make_dir(directory):
import os
import errno
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
FLAGS = flags.FLAGS
flags.DEFINE_string('expected_rgb_frame_suffix', ".jpg", 'Expected RGB frame files\' suffix.')
def compute_optical_flow_tvl1(video_frames_folder):
"""Compute the TV-L1 optical flow."""
TVL1 = DualTVL1()
# Collect RGB frame paths.
rgb_frame_files = os.listdir(video_frames_folder)
rgb_frame_files = [frame_file for frame_file in rgb_frame_files
if frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]
rgb_frame_files.sort()
num_frames = len(rgb_frame_files)
assert num_frames >= 2, "Only find %d (<=2) RGB frames under %s." % (num_frames, video_frames_folder)
# Iteratively compute optical flows.
optical_flows = []
prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(rgb_frame_files[0], to_float=False))
for i in range(1, num_frames):
cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(rgb_frame_files[1], to_float=False))
cur_flow = TVL1.calc(prev_frame, cur_frame, None)
assert (cur_flow.dtype == np.float32)
optical_flows.append(cur_flow)
prev_frame = cur_frame
return optical_flows
def save_images(images, file_pattern, start_idx=1):
for i, image in enumerate(images):
file_path = file_pattern % (start_idx+i)
make_dir(file_path)
image_funcs.save_image(image, file_path)
def main():
optical_flows = compute_optical_flow_tvl1("")
frame_file_pattern = "%05d.jpg"
folder = ""
file_pattern = os.path.join(folder, frame_file_pattern)
save_images(optical_flows, file_pattern)
if __name__ == '__main__':
main()
|
7,568 | c7553cadb49c9c7e80a7800b9bff4d5f64796494 | import pytest
from a3 import *
from test_utils import *
from numpy import allclose
def test_problem_7_1_8():
assert(check_linalg())
assert(abs(problem_7_1_8(5000)-84.8)<1)
|
7,569 | 7ff19ee35422395f78dca1e17a736df20a40ea98 | import os
import sqlite3 as db
os.system('clear')
persons = []
class Person:
def __init__(self, name, surname, job, salary):
self.name = name
self.surname = surname
self.job = job
self.salary = salary
def create(name):
conn = db.connect(name + '.db')
c = conn.cursor()
c.execute("""CREATE TABLE first(
id integer PRIMARY KEY AUTOINCREMENT,
name text,
surname text
)""")
c.execute("""CREATE TABLE second(
id integer PRIMARY KEY AUTOINCREMENT,
surname text,
job text,
salary integer,
FOREIGN KEY(id) REFERENCES first(id),
FOREIGN KEY(surname) REFERENCES first(surname)
)""")
conn.commit()
conn.close()
def database(s):
conn = db.connect(sqldb+'.db')
c = conn.cursor()
c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.surname))
c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (s.surname, s.job, s.salary))
conn.commit()
conn.close()
def insert():
name = input('Enter your name: ')
surname = input('Enter your surname: ')
confirm = input('Have you got a job? ')
if 'y' in confirm:
job = input('What kind of job you have? ')
salary = input('How much they pay for you? ')
surname = Person(name, surname, job, salary)
persons.append(surname)
database(surname)
else:
print('We need a humans with job, bye')
while True:
command = input(">> ")
if command == 'insert':
insert()
elif command == 'list':
for i in persons:
print(i.surname)
continue
elif command == 'create database':
sqldb = input('Enter the name of new database: ')
create(sqldb)
elif command == 'clear' or command == 'cls':
loc = os.getcwd()
if 'C:' in loc or 'D:' in loc:
os.system('cls')
else:
os.system('clear')
else:
print('No command found')
continue |
7,570 | 0dec0f04cfe891eea74ef45484fa7433e3429dcd | import os
import glob
ONE_KB = 1024
def get_files(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
return (
filename
for _, _, files in os.walk(dirname)
for filename in files
if int(filename) >= size_in_kb * ONE_KB
)
# Pybites solution
def get_files1(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
for file in glob.glob(os.path.join(dirname, "*")):
if os.stat(file).st_size >= size_in_kb * ONE_KB:
yield file
|
7,571 | d567dfe29380a34534308446a9c8940cede84083 | def func_sum_even(n):
e_digit1=n%10
n//=10
e_digit2=n%10
e_digit3=n//10
sum_even=e_digit1*(1-e_digit1%2)+e_digit2*(1-e_digit2%2)+e_digit3*(1-e_digit3%2)
return sum_even
# n=int(input())
# print(func_sum_even(n)) |
7,572 | e6b3def6ed6f2523d88912832a876caf2742b786 | import argparse
import pickle
import pandas as pd
from pyspark.sql.session import SparkSession
parser = argparse.ArgumentParser()
parser.add_argument('--rs', type=str, nargs='+')
args = parser.parse_args()
ss = SparkSession.builder.getOrCreate()
post_df = None
for f in args.rs:
df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')
post_df = df if post_df is None else post_df.union(df)
subreddit_ids = pickle.load(open('subreddit_ids', 'rb'))
ret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)
ret.write.orc('RS.orc', mode='overwrite')
ret.write.json('RS.json', mode='overwrite')
|
7,573 | 422491852b80c2fc4a2e73c01fd01acaad4cf9c8 | #Testing Operating System Descriptions
#OS : LMDE 4 Debbie
#Version: 4.6.7
#Kernal Version : 4.19.0-8-amd64
#Scripting Langages : Python3
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Libraries used
#pandas is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,
import pandas as pd
#NumPy is a Python library used for working with arrays.
#NumPy aims to provide an array object that is up to 50x faster than traditional Python lists.Arrays are very frequently used in data science, where speed and resources are very important.
import numpy as np
#Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations in Python
#matplotlib.pyplot is a state-based interface to matplotlib. It provides a MATLAB-like way of plotting.
#pyplot is mainly intended for interactive plots and simple cases of programmatic plot generation
import matplotlib.pyplot as plt
#Simple and efficient tools for predictive data analysis. Built on NumPy, SciPy, and matplotlib. Scikit-learn is probably the most useful library for machine learning in Python.
#The sklearn library contains a lot of efficient tools for machine learning and statistical modeling including classification, regression, clustering and dimensionality reduction.
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
#-----------------------------------------------------------------------------------------------------------------------------
#Reading the dataset
dataset = pd.read_csv('./dataset/datafile.csv')
#----------------------------------------------------------------------------------------------------------------------------
#Extracting X and Y values
#Preparing the Data
#divide the data into "attributes" and "labels". Attributes are the independent variables while labels are dependent variables whose values are to be predicted
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
#------------------------------------------------------------------------------------------------------------------------------
#converting to arrays of data
X = np.array(X).reshape(-1, 1)
y = np.array(X).reshape(-1, 1)
#-----------------------------------------------------------------------------------------------------------------------------
#we have our attributes and labels, the next step is to split this data into training and test sets. We'll do this by using Scikit-Learn's built-in train_test_split()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
#The above script splits 80% of the data to training set while 20% of the data to test set. The test_size variable is where we actually specify the proportion of test set.
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Training the Algorithm
regr = LinearRegression()
regr.fit(X_train, y_train)
print(regr.score(X_test, y_test))
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Making Predictions
y_pred = regr.predict(X_test)
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Evaluating the Algorithm
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Plotting the scatter
plt.scatter(X_test, y_test, color ='b')
plt.plot(X_test, y_pred, color ='k')
plt.show()
|
7,574 | e8daf03f987c7512ff245bfbe16c447acd6b5986 | import API.enum as enum
import re
class ObjectValidator():
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.statusCode = 200
self.validationPipeline = []
self.errors = {}
self.invalidFields = []
def flush(self):
self = ObjectValidator()
return self
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)
def addValidation(self, data, validatorFunction):
self.validationPipeline.append({
'data': data,
'validator': validatorFunction
})
def _check_with_authenticationValidator(self, data):
if not data['user'].is_authenticated:
self.setError(data['field'], enum.Error.UNAUTHORIZED.value)
def _check_with_nonDuplicateObjectValidator(self, data):
model = data['model']
if model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)
def _check_with_ObjectExistenceValidator(self, data):
model = data['model']
if not model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.GENERIC_OBJECT_NOT_FOUND.value)
def checkNonDuplicateObject(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter': filter},
self._check_with_nonDuplicateObjectValidator)
return self
def checkObjectExistence(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter': filter},
self._check_with_ObjectExistenceValidator)
return self
def checkUserAuthentication(self, field, user):
self.addValidation({'field': field, 'user': user},
self._check_with_authenticationValidator)
return self
#\b(?!(\d)\1{3})[13-9]{4}[1346-9][013-9]{5}\b
# postal code validation
class FieldValidator():
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.validationPipeline = []
self.statusCode = 200
self.errors = {}
self.invalidFields = []
def flush(self):
self = FieldValidator()
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)
return self
def addValidation(self, data, validatorFunction):
if (data['value'] == 'unAssigned') and data['field'] in self.data.keys():
data['value'] = self.data[data['field']]
elif data['value'] == 'unAssigned' and data['field'] not in self.data.keys():
data['value'] = None
self.validationPipeline.append({
'data': data,
'validator': validatorFunction
})
def _check_with_typeValidator(self, data):
if not isinstance(data['value'], data['type']):
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_nationalLegalCodeValidator(self, data):
nationalLegalCode = data['value']
result = 0
validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]
if len(nationalLegalCode) != 11:
self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)
return
for i in range(10):
result += (int(nationalLegalCode[-2]) + 2 + int(nationalLegalCode[i])) * validationList[i]
if result % 11 == 10:
reminder = 0
else:
reminder = result % 11
if reminder == int(nationalLegalCode[-1]):
valid = True
else:
valid = False
if valid is False:
self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)
def _check_with_nationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer1NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer2NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_featuresValidator(self, data):
for i in data['value']:
if i not in ["پلتفرم پرداخت در محل", "باشگاه مشتریان", "درگاه پرداخت اینترنتی"]:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
break
def _check_with_userNameValidator(self, data):
username = re.match(r"^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$", data["value"])
if 'admin' in data['value'] or 'zibal' in data['value'] or username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def _check_with_phoneNumberValidator(self, data):
if data['value'] is None or len(data) < 1:
self.setError(data['field'], enum.Error.PHONE_INCORRECT_TEMPLATE.value)
def _check_with_mobileValidator(self, data):
mobileNumber = data['value']
if mobileNumber is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(r"(^09[0-9]{9}$)", mobileNumber)
if match_object is None or mobileNumber is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_emailValidator(self, data):
email = data['value']
if email is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if match_object is None or email is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_noneValidator(self, data):
if data['value'] is None or data['value'] == "":
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_fileValidator(self, data):
file = data['value']
field = data['field']
if file is None:
self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)
return
elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:
self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)
types = data['options'].get('types', None)
valid = False
if types is not None:
for type in types:
valid = valid or type in file.content_type
if valid is False:
self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)
def _check_with_IBANValidator(self, data):
iban = data['value']
if len(iban)!=26 or not iban.startswith("IR"):
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
return
code = iban[4:]+iban[:4]
code = code.replace('I','18').replace('R','27')
if int(code)%97!=1:
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
def _check_with_subMerchantBankAccountValidator(self, data):
if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID=data['value']['subId'], status=1).exists():
self.setError(data['field'], enum.Error.IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)
def _check_with_minDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) < data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_maxDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) > data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_equalDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) != data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_inputValidator(self, data):
if data['value'] is None or len(data['value']) < 1:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_IbanTransferable(self, data):
if data['value'][4:7]=='062' and data['value'][-13:-10]=='080':
self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value)
def _check_with_username(self, data):
username = re.match(r"^[a-zA-Z0-9_.-]+$", data["value"])
if username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
#############################################################################
def checkType(self, field, type, value="unAssigned"):
self.addValidation({'field': field, 'type': type, 'value': value}, self._check_with_typeValidator)
return self
def checkNationalLegalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_nationalLegalCodeValidator)
return self
def checkOfficer1NationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_officer1NationalCodeValidator)
return self
def checkOfficer2NationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_officer2NationalCodeValidator)
return self
def checkNationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_nationalCodeValidator)
return self
def checkFeatures(self, field, features="unAssigned"):
self.addValidation({'field': field, 'value': features}, self._check_with_featuresValidator)
return self
def checkUserName(self, field, username="unAssigned"):
self.addValidation({'field': field, 'value': username}, self._check_with_userNameValidator)
return self
def checkPhone(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)
return self
def checkMobile(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_mobileValidator)
return self
def checkEmail(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_emailValidator)
return self
def checkNotNone(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_noneValidator)
return self
def checkFile(self, field, data, **options):
self.addValidation({'field': field, 'value': data, 'options': options}, self._check_with_fileValidator)
return self
def checkIBAN(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_IBANValidator)
return self
def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):
data = {
'userId': userId,
'subId': subId
}
self.addValidation({'field': field, 'value': data}, self._check_with_subMerchantBankAccountValidator)
return self
def checkDataLength(self, field, length,mode='equal', data="unAssigned"):
if mode == 'equal':
validatorFunction = self._check_with_equalDataLengthValidator
if mode == 'min':
validatorFunction = self._check_with_minDataLengthValidator
if mode == 'max':
validatorFunction = self._check_with_minDataLengthValidator
self.addValidation({'field': field, 'value': data, 'length': length}, validatorFunction)
return self
def checkInputData(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_inputValidator)
return self
def checkTelephone(self, field, data="unAssigned"): ##TODO
self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)
return self
def checkIsIbanTransferable(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_IbanTransferable)
return self
def checkUsername(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_username())
class DataValidator:
def __init__(self, data={}):
self.fieldValidator = FieldValidator(data)
self.objectValidator = ObjectValidator()
self.errors = {}
self.statusCode = 200
def getValidatorsErrors(self):
self.objectValidator.validate()
self.fieldValidator.validate()
for key in self.fieldValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []) + self.fieldValidator.getErrors()[key]
self.statusCode = self.fieldValidator.statusCode
for key in self.objectValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []) + self.objectValidator.getErrors()[key]
self.statusCode = self.objectValidator.statusCode if self.objectValidator.statusCode != 200 else self.statusCode
return self.errors
def generateMessage(self):
messages = []
errorKeys = self.errors.keys()
if 'email' in errorKeys:
messages.append(' آدرس ایمیل نامعتبر است')
if "name" in errorKeys :
messages.append('نام را وارد کنید')
if 'username' in errorKeys:
messages.append('نام کاربری را وارد کنید')
if 'password' in errorKeys:
messages.append('رمز عبور را وارد کنید')
if 'mobile' in errorKeys:
messages.append('تلفن همراه خود را وارد کنید.')
if 'phone' in errorKeys:
messages.append('تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')
if 'iban' in errorKeys or 'IBAN' in errorKeys:
messages.append('شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله')
if 'user' in errorKeys:
messages.append('لطفا وارد شوید')
return messages |
7,575 | 74e3f4cd7b09d9b96feb3f927a509b113481eaed | from django.apps import AppConfig
class CheckoutConfig(AppConfig):
name = "checkout"
# Override the ready method and import the signals module
# so that update_on_save and update_on_delete will be called
# after an OrderLineItem model instance is saved or deleted
def ready(self):
import checkout.signals
|
7,576 | 0a19efea0c8d7e5e248ca3265ffcb55604dc500c | __author__ = 'Administrator'
import socket,os,time
server = socket.socket()
server.bind(("localhost",9999))
server.listen()
while True:
conn,addr = server.accept()
while True:
data = conn.recv(1024)
if not data:
break
cmd,filename = data.decode().split()
if os.path.isfile(filename)
f = open(filename,"rb")
m = hashlib.md5()
file_size = os.stat(filename).st_size
conn.send(str(file_size).encode())
conn.recv(1024)
for line in f:
m.update(line)
conn.send(line)
print("file_md5",m.hexdigest())
f.close()
server.close()
|
7,577 | 4c8a873c816678532b029af409be13258757eae1 | # Напишите программу, которая вводит с клавиатуры последовательность чисел и выводит её
# отсортированной в порядке возрастания.
def is_numb_val(val):
try:
x = float(val)
except ValueError:
return False
else:
return True
def main():
num_seq = input("Введите последовательность чисел через пробел: ").split()
num_lst = [float(s) for s in num_seq if is_numb_val(s)]
print(sorted(num_lst))
if __name__ == '__main__':
main()
|
7,578 | 20d363f5d02cc0b1069aa8951999c0cb22b85613 | # This is a module
class MyMath:
def isEven(num):
if(num%2==0):
return True
return False
def isOdd(num):
if(num%2==0):
return False
return True
def isPrime(num):
for i in range(2,num):
if num%i==0:
return False
return True
class Calsi:
def add(num1, num2):
return num1+num2
def sub(num1, num2):
return num1-num2
def mul(num1,num2):
return num1*num2
|
7,579 | 31b87a3ceca1f48665ecc9754d5f87bb9b7bbf13 | import psycopg2
from .configuration import ConfigurationException
DB_CONNECT_STRING = "host='{host}' dbname='{dbname}' user='{user}' password='{passwd}'"
class DBItemCompany:
def __init__(self, _id, tweeter, category, categoryUrl, provenScore, ranking, location, url, categoryId):
self.id = _id
self.twitterAccount = tweeter
self.category = category
self.categoryUrl = categoryUrl
self.provenScore = provenScore
self.ranking = ranking
self.location = location
self.url = url
self.categoryId = categoryId
@property
def invalidScore(self):
return self.provenScore is None or self.provenScore < 1
@property
def twitter(self):
return '@' + self.twitterAccount
class DBException(Exception):
"""
Represents a generic exception thrown by the Database Manager
"""
pass
class DBManager:
def __init__(self, cfg):
self.cfg = cfg
self.__companies = {}
self.__loggedIn = False
self.connection = None
self.cursor = None
def __del__(self):
try:
self.connection.close()
except psycopg2.Error:
pass
def __logInDb(self):
try:
dbSettings = self.cfg.databaseSettings
self.connection = psycopg2.connect(DB_CONNECT_STRING.format(
host=dbSettings[0], dbname=dbSettings[1],
user=dbSettings[2], passwd=dbSettings[3]
))
self.cursor = self.connection.cursor()
self.__loggedIn = True
return True
except (psycopg2.OperationalError, ConfigurationException):
return False
def __getDomainName(self, schema):
try:
self.cursor.execute("SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'".format(schemaname=schema))
return 'http://' + self.cursor.fetchone()[0]
except psycopg2.DatabaseError:
raise DBException('Failed to extract domain name from database')
def __buildCategoryUrl(self, catId, schemaName):
return '{domain}/vendors/?find=category-{categoryId}'.format(domain=self.__getDomainName(schemaName), categoryId=catId)
def __buildProfileUrl(self, catSlug, profSlug, schemaName):
return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain=self.__getDomainName(schemaName),
categorySlug=catSlug,
profileSlug=profSlug)
def __buildProfileUrlWOCategory(self, profSlug, schemaName):
return '{domain}/vendors/{profileSlug}'.format(domain=self.__getDomainName(schemaName), profileSlug=profSlug)
def __getCompaniesData(self, schema):
"""
Load Companies list from database
"""
try:
self.cursor.execute("""SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE
twitter <> ''""".format(schema=schema))
data = self.cursor.fetchall()
companies = []
for entry in data:
self.cursor.execute('SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'.format(schema=schema, vendor=entry[0]))
cities = self.cursor.fetchall()
if cities is None:
continue
city = ''
for cityId in cities:
self.cursor.execute('SELECT city FROM {schema}.locations_location WHERE id = {city}'.format(schema=schema, city=cityId[0]))
cityName = self.cursor.fetchone()
if cityName is not None:
city += cityName[0]
self.cursor.execute('SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND "primary" is true'.format(schema=schema, vendor=entry[0]))
customKind = self.cursor.fetchone()
if customKind is None:
catId = rank = None
else:
catId, rank = customKind
if catId is not None:
self.cursor.execute('SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'.format(schema=schema, cat=catId))
catData = self.cursor.fetchone()
else:
catData = None
companies.append(DBItemCompany(
_id = entry[0],
tweeter = entry[1],
category = catData[0] if catData is not None else None,
categoryUrl = self.__buildCategoryUrl(catId, schema) if catId is not None else None,
provenScore = entry[2],
ranking = rank,
location = city,
url = self.__buildProfileUrl(catData[1], entry[3], schema) if catData is not None else self.__buildProfileUrlWOCategory(entry[3], schema),
categoryId = catId
))
self.__companies[schema] = companies
except psycopg2.DatabaseError as err:
raise DBException(err.args[0])
def domainUrl(self, schema):
return self.__getDomainName(schema)
def refreshData(self, schemas):
if not self.__loggedIn:
if not self.__logInDb():
return False
for schema in schemas:
self.__getCompaniesData(schema)
return True
@property
def companies(self):
return self.__companies
@property
def isConnected(self):
return self.__loggedIn
|
7,580 | b34ad8d7fc8df0ab86c5930ab2b5aa1f86d13ae3 | from django.db import models
class Author(models.Model):
author = models.CharField(
"Author",
max_length=30,
blank=False,
null=False
)
biography = models.TextField(
"About author",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.author
class Series(models.Model):
title = models.CharField(
"Title of series",
max_length=100,
blank=False,
null=False
)
description = models.TextField(
"About this series",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.title
class Genre(models.Model):
genre = models.CharField(
"Genre",
max_length=50,
blank=False,
null=False
)
description = models.TextField(
"About this genre",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.genre
class PublishingHouse(models.Model):
house = models.CharField(
"Publishing House",
max_length=40,
blank=False,
null=False
)
history = models.TextField(
"Other books of this house",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.house
|
7,581 | 2cef5311a9ff9497ad6611fe7b47e4f7c5b1b3c7 | #!/usr/bin/python
from random import *
prob = "change"
cases = [
10,
10,
10,
100,
100,
100000,
100000,
100000,
100000,
100000
]
cur = 0
st = [1,2,5,10,20,50,100,200,500,1000,2000,5000,10000]
for (n) in cases :
cout = ""
cur += 1
print "make %d..." % cur
##-----
#TODO generate the data
tot = 0
stt = []
for a in st :
for b in st :
if b < a and a <= n :
tot = tot + 1
stt.append( (a,b) )
cout += "%d\n" % tot
for (a,b) in stt :
cout += "%d %d\n" % (a, b)
##-----
f = file( prob + str(cur) + ".in", "w" )
f.write( cout )
f.close()
|
7,582 | a28ece0db9bf0d4c3ab26207216b1da45f7aaa0f | """Proper parenthetics extra credit kata."""
from _que_structure import Q
def proper_parenthetics(string):
"""Return if parentheses are matching or not."""
if isinstance(string, str):
paren_q = Q()
for i in range(len(string)):
paren_q.enqueue(string[i])
opening_parens = 0
closing_parens = 0
while paren_q.size() > 0 and paren_q.queue.head is not None:
i = paren_q.dequeue()
if i != '(' and i != ')':
raise TypeError('proper_parenthetics takes only parentheses.')
if i == '(' and closing_parens == 0:
opening_parens += 1
elif i == '(' and closing_parens > 0:
closing_parens -= 1
elif i == ')' and opening_parens == 0:
return -1
elif i == ')' and opening_parens > 0:
opening_parens -= 1
if opening_parens - closing_parens == 0:
return 0
if opening_parens - closing_parens > 0:
return 1
raise TypeError('proper_parenthetics takes only strings')
|
7,583 | 170716ccaaf45db2ee974de260883a8d70513f52 | from django.db import models
class Event(models.Model):
name = models.TextField()
host = models.TextField(null=True)
fields = models.TextField(null=True)
description = models.TextField(null=True)
date = models.TextField()
start_time = models.TextField()
end_time = models.TextField()
banner_path = models.TextField(null=True)
status = models.IntegerField()
reg = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
7,584 | 0e19d7251db3382c34ad2d38a7984b65325ecfbf | from django.db import models
from django.db.models.base import Model
# Create your models here.
class Categoria(models.Model):
categoria = models.CharField(max_length=40)
def __str__(self):
return self.categoria
class Producto(models.Model):
codigo = models.CharField(max_length=40)
nombre = models.CharField(max_length=40)
precio = models.IntegerField()
stock = models.IntegerField()
descripcion = models.CharField(max_length=40)
categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)
fecha = models.DateField()
imagen = models.ImageField(null=True, blank=True)
def __str__(self):
return self.nombre
class Cliente(models.Model):
nombre = models.CharField(max_length=41)
paterno = models.CharField(max_length=40)
rut = models.CharField(max_length=9)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
def __str__(self):
return self.nombre
class Usuario(models.Model):
mail = models.CharField(max_length=100)
contraseña = models.CharField(max_length=100)
rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)
def __str__(self):
return self.rut
class DetalleVenta(models.Model):
tipo_comprovante = models.CharField(max_length=100)
serie_comprovante = models.CharField(max_length=7)
fecha_comprovante = models.DateField(max_length=100)
iva = models.IntegerField()
total = models.IntegerField()
def __str__(self):
return self.serie_comprovante
class Descuento(models.Model):
codigo_descuento = models.CharField(max_length=7)
valor_descuento = models.IntegerField()
def __str__(self):
return self.codigo_descuento
class Venta(models.Model):
descripcion = models.CharField(max_length=100)
total_venta = models.IntegerField()
def __str__(self):
return self.total_venta
class Sucursal(models.Model):
direccion = models.CharField(max_length=100)
numero_sucursal = models.IntegerField()
def __str__(self):
return self.numero_sucursal
class Comuna(models.Model):
direccion = models.CharField(max_length=100)
numero_comuna = models.IntegerField()
numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)
def __ (self):
return self.numero_sucursal
class Region(models.Model):
direccion = models.CharField(max_length=100)
numero_region = models.IntegerField()
numero_comuna= models.ForeignKey(Comuna,on_delete=models.DO_NOTHING)
def __str__(self):
return self.numero_region
class Pedido(models.Model):
numero_pedido = models.IntegerField()
fecha_pedido = models.DateField(max_length=100)
iva = models.IntegerField()
def __int__(self):
return self.numero_pedido
class Proveedores(models.Model):
nombre_proveedor = models.CharField(max_length=40)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)
def __str__(self):
return self.nombre_proveedor
class Suscripcion(models.Model):
fecha_suscripcion = models.DateField
valor_suscripcion = models.IntegerField()
suscrito = models.IntegerField() |
7,585 | ba289bcdc0aa7c2ad70dba7fac541900d0b55387 | import os
# Set a single thread per process for numpy with MKL/BLAS
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_DEBUG_CPU_TYPE'] = '5'
import numpy as np
from matplotlib import pyplot as plt
from copy import deepcopy
from kadal.optim_tools.MOBO import MOBO
from kadal.surrogate_models.kriging_model import Kriging
from kadal.surrogate_models.supports.initinfo import initkriginfo
from kadal.testcase.analyticalfcn.cases import evaluate
from kadal.misc.sampling.samplingplan import sampling
def generate_kriging():
# Sampling
nsample = 20
nvar = 2
nobj = 2
lb = -1 * np.ones(shape=[nvar])
ub = 1 * np.ones(shape=[nvar])
sampoption = "halton"
samplenorm, sample = sampling(sampoption, nvar, nsample, result="real",
upbound=ub, lobound=lb)
X = sample
# Evaluate sample
global y
y = evaluate(X, "schaffer")
# Initialize KrigInfo
KrigInfo1 = initkriginfo()
# Set KrigInfo
KrigInfo1["X"] = X
KrigInfo1["y"] = y[:, 0].reshape(-1, 1)
KrigInfo1["problem"] = "schaffer"
KrigInfo1["nrestart"] = 5
KrigInfo1["ub"] = ub
KrigInfo1["lb"] = lb
KrigInfo1["optimizer"] = "lbfgsb"
# Initialize KrigInfo
KrigInfo2 = deepcopy(KrigInfo1)
KrigInfo2['y'] = y[:, 1].reshape(-1, 1)
# Run Kriging
krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj1.train(n_cpu=n_cpu)
loocverr1, _ = krigobj1.loocvcalc()
print("LOOCV error of Kriging model: ", loocverr1, "%")
krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj2.train(n_cpu=n_cpu)
loocverr2, _ = krigobj2.loocvcalc()
print("LOOCV error of Kriging model: ", loocverr2, "%")
return krigobj1, krigobj2
def runopt(krigobj1, krigobj2):
moboInfo = dict()
moboInfo["nup"] = 3
moboInfo["nrestart"] = 10
moboInfo["acquifunc"] = "ehvi"
moboInfo["acquifuncopt"] = "lbfgsb"
Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5)
xupdate, yupdate, supdate, metricall = Optim.run(disp=True)
return xupdate, yupdate, metricall
if __name__ == '__main__':
krigobj1, krigobj2 = generate_kriging()
xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)
print(metricall)
plt.scatter(y[:, 0], y[:, 1])
plt.scatter(yupdate[:, 0], yupdate[:, 1])
plt.show()
|
7,586 | fa3ab879541c04e278317b11dd79e6e1b4319536 | FILE = "Luke"
NAME = "Luke Walker"
NATIONALITY = "American"
CLASS = "Manipulator"
WEAPON = ""
BIRTH = ""
BIRTH_LOCATION = ""
LETTER = "W"
RECRUITMENT_ORDER = 10
SUMMARY = ""
ABILITIES = ""
BACKSTORY = ""
HIGHLIGHTS = ""
SUMMONS = ("Tonberry", "Grimnir", "Griever", "Starlet")
|
7,587 | 1574f034ff9b6ddb785e4c54758b2057009198ed | alias_macro = {
"class": "Application",
"method": "alias_macro",
"doc": """
Returns or modifies the macro of a command alias.
""",
"syntax": """
Rhino.AliasMacro (strAlias [, strMacro])
""",
"params": {
0: {
"name": "alias",
"optional": False,
"type_vb": "string",
"type_string": "str",
"doc": """
The name of an existing command alias.
"""
},
1: {
"name": "macro",
"optional": True,
"type_vb": "string",
"type_string": "str",
"doc": """
The new macro to run when the alias is executed.
"""
},
},
"returns": {
0: {
"type_vb": "String",
"doc": "If a new macro is not specified, the existing macro if successful."
},
1: {
"type_vb": "String",
"doc": "If a new macro is specified, the previous macro if successful."
},
2: {
"type_vb": "Null",
"doc": "If not successful, or on error."
},
}
}
|
7,588 | 3dc83168264fbb4f9b0ab2980b845dffdc4417bb | import requests
from bs4 import BeautifulSoup
class Book:
def __init__(self, url):
self.url = url
self.title = ""
self.category = ""
self.upc=""
self.price_including_tax=""
self.price_excluding_tax=""
self.number_available=""
self.description=""
self.review_rating=""
self.image_url=""
self.tax=""
def scrap(self):
book = requests.get(self.url)
soup = BeautifulSoup(book.content, "html.parser")
self.__fill_title(soup)
self.__fill_category(soup)
self.__fill_upc(soup)
self.__fill_price_including_tax(soup)
self.__fill_price_excluding_tax(soup)
self.__fill_number_available(soup)
self.__fill_description(soup)
self.__fill_review_rating(soup)
self.__fill_image_url(soup)
self.__fill_tax(soup)
def __fill_title(self,soup):
title = soup.find("div", {"class": "col-sm-6 product_main"}).find("h1")
self.title= title.text
# return self.title
def __fill_category(self,soup):
category = soup.findAll("li")
category2 = category[2].text
self.category = category2.replace("\n", "")
# return self.category
def __fill_upc(self,soup):
tds = soup.findAll("td")
self.upc = tds[0].text
def __fill_price_including_tax(self,soup):
tds = soup.findAll("td")
self.price_including_tax = tds[3].text
def __fill_price_excluding_tax(self,soup):
tds = soup.findAll("td")
self.price_excluding_tax = tds[2].text
def __fill_number_available(self,soup):
tds = soup.findAll("td")
self.number_available = tds[5].text
def __fill_description(self,soup):
div = soup.find("div", class_="sub-header")
p = div.find_next_sibling()
self.description = p.text
# return self.description
def __fill_review_rating(self,soup):
p = soup.find("div", {"class": "col-sm-6 product_main"}).find(
"p", class_="star-rating"
)
rating = str(p["class"])
star = rating[15:-1]
star_rating = eval(star)
return star_rating
def __fill_image_url(self,soup):
image = soup.find("div", {"class": "item active"}).find("img")
image_url = image["src"]
image_clean_url = image_url.replace("../../", "http://books.toscrape.com/")
self.image_url = image_clean_url
def __fill_tax(self,soup):
tds = soup.findAll("td")
self.tax = tds[4].text
def __str__(self):
output = f"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} "
return output
# book = Book("http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html")
# book.scrap("http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html")
# print(book)
|
7,589 | e51c57f4487a3225936d073142f1f770815c0d47 | #!/usr/bin/env python
# coding: utf-8
import sys,pysrt
import urllib2,urllib,json
import re
from urlparse import urlparse
import os
from mtranslate import translate
from argparse import ArgumentParser
reload(sys)
sys.setdefaultencoding('utf8')
#----------------------------------------------------------------------------------------------------------------------------------
def cleanhtml(raw_html):
'''
TODO: refactor this to make it as generic as possible
'''
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
cleantext = cleantext.replace('[vacilación]','...')
cleantext = cleantext.replace(' ',' ')
cleantext = urlparse(cleantext).path
return cleantext
#----------------------------------------------------------------------------------------------------------------------------------
def generateSub(args,_subtitle,_filename):
subs = pysrt.from_string(str(_subtitle).decode('utf-8'))
output = args.OUTPUT + _filename
#file = pysrt.SubRipFile()
text = ''
for index in range(len(subs)):
if subs[index].text != '':
if args.VERBOSE:
print "Translating line:" + cleanhtml(subs[index].text)
subs[index].text = translate(cleanhtml(subs[index].text).encode('utf-8'),args.LANG_TO,args.LANG_FROM)
subs.save(output)
#----------------------------------------------------------------------------------------------------------------------------------
def generateSubMedia(args):
subLangURL= 'https://media.upv.es/rest/plugins/admin-plugin-translectures/langs/'
subUrl = 'https://media.upv.es/rest/plugins/admin-plugin-translectures/srt/'
langlist =json.loads(urllib2.urlopen(subLangURL + args.SOURCE).read())
for lang in langlist:
if lang['lang']==args.LANG_FROM:
sub = urllib2.urlopen(subUrl + args.SOURCE +'/' + args.LANG_FROM).read()
generateSub(args,sub,args.SOURCE+'_' + args.LANG_TO.lower() + '.srt')
return 0
#----------------------------------------------------------------------------------------------------------------------------------
def generateSubFile(args,_filename=None):
if _filename is None:
_source = args.SOURCE
else:
_source = _filename
if _source[-4:]=='.srt':
substring = open(_source,'r').read()
generateSub(args,substring,_source.replace('.srt','_' + args.LANG_TO + '.srt'))
else:
print "Incorrect file format"
return -1
#----------------------------------------------------------------------------------------------------------------------------------
def generateSubFolder(args):
_source = args.SOURCE if args.SOURCE[-1:]=='/' else args.SOURCE + '/'
if os.path.isdir(args.SOURCE):
for root, dirs, files in os.walk(args.SOURCE):
for f in files:
if f[-4:]=='.srt':
substring = open(root + f if root[-1:]=='/' else root + '/' + f,'r').read()
generateSub(args,substring,f.replace('.srt','_' + args.LANG_TO + '.srt'))
else:
print "Incorrect file format"
return -1
#----------------------------------------------------------------------------------------------------------------------------------
def main():
parser = ArgumentParser(description='Translate subtitle from media id, file or folder', parents=[])
parser.add_argument('-v', '--verbose', action='store_true', dest='VERBOSE', default=False, help='Verbose')
parser.add_argument('-t', '--sourceType', type=str, dest='SOURCE_TYPE', help='source type, pick between media|file|folder')
parser.add_argument('-s', '--source', type=str, dest='SOURCE', help='source of the subtitle/s')
parser.add_argument('-langf', '--langFrom', type=str, dest='LANG_FROM', default='es', help='Language that we want to translate')
parser.add_argument('-langt', '--langTo', type=str, dest='LANG_TO', default='en', help='Language of the output subtitle')
parser.add_argument('-o', '--output', type=str, dest='OUTPUT', default='./', help='Output folder to store the result')
args = parser.parse_args()
if (args.SOURCE_TYPE.lower()=='file'):
try:
generateSubFile(args)
except:
return -1
elif (args.SOURCE_TYPE.lower()=='folder'):
try:
generateSubFolder(args)
except:
return -1
elif (args.SOURCE_TYPE.lower()=='media'):
try:
generateSubMedia(args)
except:
return -1
else:
print "Choose a valid source type"
return 0
#----------------------------------------------------------------------------------------------------------------------------------
if (__name__ == '__main__'):
main()
|
7,590 | 1a29b3138f6a33fbe2781f044c1bcccd03ecd48d |
d = {
1 : 'I',
5 : 'V',
10: 'X',
50: 'L',
100: 'C',
500: 'D',
1000: 'M'
}
e = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
def convert2roman(num,rom = ""):
if num == 0:
return rom
digits = len(str(num))
multiple = 10 ** (digits -1)
cur = int(num / multiple)
cur = cur * multiple
num = num % multiple
halfway = 5 * multiple
fullway = 10 * multiple
if cur + multiple == halfway:
rom += d[multiple] + d[halfway]
elif cur + multiple == fullway:
rom += d[multiple] + d[fullway]
else:
if cur >= halfway:
cur -= halfway
rom += d[halfway]
if cur > 0:
rom += d[multiple] * int(cur / multiple)
return convert2roman(num,rom)
def convert2numeral(rom):
cur = 0
num = 0
while cur < len(rom):
if cur+1 == len(rom):
num += e[rom[cur]]
elif e[rom[cur]] > e[rom[cur+1]]:
num += e[rom[cur]]
cur += 1
elif e[rom[cur]] < e[rom[cur+1]]:
num += (e[rom[cur + 1]] - e[rom[cur]])
cur += 2
return num
a = convert2roman(499)
print(a)
a = convert2numeral(a)
print(a)
|
7,591 | b6b3d94db62b47aac9bf78e8224a38ccff9335e3 | from . import utility
from . import regular_gram_schmidt as RGSC
from . import custom_gram_schmidt as CGSC
def RegularGramSchmidt():
while True:
vectors = utility.get_matrix_from_user(5)
if len(vectors) > 0:
calc = RGSC.RegularGramSchmidt()
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input("Start over? (Y/n)")
if answer.lower() == 'n':
break
else:
continue
def CustomGramSchmidt():
while True:
print("Enter the inner product matrix 3x3")
inner_product_matrix = utility.get_matrix_from_user(3, True)
calc = CGSC.CustomGramSchmidt(inner_product_matrix)
print("Enter vectors from R(3)")
vectors = utility.get_matrix_from_user(3)
if len(vectors) > 0:
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input("Start over? (Y/n)")
if answer.lower() == 'n':
break
else:
continue
|
7,592 | ad024a2001dc6a6fa3a2a9c1b51f79132e914897 | # -*- coding: utf-8 -*-
import requests
import json
url = "http://39.108.188.34:9090/spider/zhongdengdengji.go"
# url = "http://localhost:9090/spider/zhongdengdengji.go"
input = {
"timelimit": "1年",
"title": "GD20190305001",
"maincontractno": "YT20181228001",
"maincontractcurrency": "人民币",
"maincontractsum": "100000",
"description": "Y0181228001测试供应商有限公司与测试项目有限公司就SW00002-20181226-1204,转让应收账款金额100000元T2,测试供应商有限公司已出具应收账款转让通知书,对应的发票号及金额为1111/50000,5555/50000,到期日2018-12-29。付款方万科企业股份有限公司已出具编号为ZB00002-20181226-1204的付款确认及授权书",
"addDebtorList": [
{
# 金融机构
"debtorType": "企业",
"debtorName": "测试供应商有限公司",
"orgCode": "9144030068375453XL",
"businessCode": "9144030068375453XL",
"lei": "#*¥#*(&¥#(*&¥()",
"responsiblePerson": "测试法人1",
"country": "中国",
"province": "黑龙江省",
"city": "哈尔滨市",
"address": "北京天安门",
}
]
}
data = json.dumps(input)
headers = {
'User-Agent': 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
"Content-Type": "application/json"}
response = requests.post(url, data=data, headers=headers, timeout=(500, 500))
print(response.text)
# testAccount = [{'account': 'ytbl0011', 'keyword': 'ytbl0011aDmin'}]
|
7,593 | f51d85ff352d9c84a8ded29ad94b24ca6dda46ad |
'''
IplNorm.py
Description:
Normalizing 0 - 255 initial fingerprint to a normalized image.
Using energy normalization.
Input:
-image
Output:
-norm_im
@author: Edoardo Foco
'''
import cv2
import numpy as np
def normalise(image):
dbl_image = image.astype(float)
# calculate the mean of the image.
mean = np.mean(dbl_image)
# converting numpy 8-bit image to 8- bit cv2.iplimage
iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]), cv2.cv.IPL_DEPTH_8U, 1)
cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 * image.shape[1])
# initializing 32-bit floating point iplimage
image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.IPL_DEPTH_32F,1)
# converting 8-bit unsigned integer image to 32-bit floating point image
cv2.cv.CvtScale(iplImage,image_32F)
# energy Normalization. Formula: image = image/mean(image)
cv2.cv.ConvertScale(image_32F, image_32F, (1/mean), 0);
# re-converting to numpy image
norm_im = np.asarray(image_32F[:,:])
return norm_im |
7,594 | 9fd73e0a1dacc46c177f11ce4cf2351b3d622c0d | # Improting Image class from PIL module
from PIL import Image
# Opens a image in RGB mode
im = Image.open("data/frame1.jpg")
# Setting the points for cropped image
left = 155
top = 65
right = 360
bottom = 270
# Cropped image of above dimension
# (It will not change orginal image)
im1 = im.crop((left, top, right, bottom))
# Shows the image in image viewer
im1.show()
im.show()
|
7,595 | d7570bbea1e8c7674d507f8e86ce04d22058b21b | class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
count = 0
inputLength = len(nums)
for i in range (0, inputLength):
for j in range (i, inputLength - 1):
if (nums[i] == nums[j + 1]): count += 1
return count |
7,596 | ef7fad5019e79950e8fad56404e9ba5d302cfe1c |
def convertEnEntier(nombre):
result = "";
if (nombre == 4):
result = "IV"
if (nombre == 3):
result = "III"
if (nombre == 2):
result = "II"
if (nombre == 1):
result = "I"
return result
print (convertEnEntier(1))
print (convertEnEntier(2))
print (convertEnEntier(3))
|
7,597 | 8b0eed6d1f24b5dd30726ce08c97354a5d5ab69b | # Generated by Django 2.1.2 on 2018-10-25 09:36
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('grafit', '0002_article'),
]
operations = [
migrations.RunSQL("""
INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.
10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]
On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries
MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.
Indexing
Fields in a MongoDB document can be indexed with primary and secondary indices.
Replication
MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.
Load balancing[10]
MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.
MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');
INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]
Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9]
Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)
Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');
INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]
Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]
In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');
INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system."
"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services."
"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."');
INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.
Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).
Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.
According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc."
phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.
Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.
Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.
"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post.
However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.
"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc."
Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.
');
INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.
When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.
"Who care about database design? What about mockups? What about workflows?"
Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses.
That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.
We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used.
Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.
That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.
Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.
"Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that."
Except you couldn''t enter a customer without an order.
Except you couldn''t enter an order without at least one item on it.
Except you couldn''t enter an item unless it was listed in inventory.
Except that reserved the "inventory" item and made it unavailable.
Except, except, except ...
It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.
Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.
If they had simply had a proper database design up front, they could have reused their existing system with little trouble.
That''s what bad database design costs you and why I usually start with that before writing my software.
Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');
INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.
Learn more about actions
As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');
INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '
The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.
Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.
Affected products
GitHub Desktop
GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.
Atom
Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.
Ensure you’re on the latest Atom release by completing any of the following:
Windows: From the toolbar, click Help -> Check for Updates
MacOS: From the menu bar, click Atom -> Check for Update
Linux: Update manually by downloading the latest release from atom.io
Git on the command line and other clients
In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.
Additional notes
Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.
Details of the vulnerability
This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.
The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.
The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).
The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.
We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).
Please update your copy of Git soon, and happy cloning!
');
INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.
The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.
The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.
So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.
To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.
What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');
INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.
rbanffy on Aug 18, 2012 [-]
I think this would be a mistake.
This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.
All that is lost is the MySQL name and brand.
PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.
Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.
Udo on Aug 18, 2012 [-]
I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!
MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.
Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.
Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.
The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');
INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?
Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');
INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.
Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.
');
INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.
This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.
What is PostgreSQL?
PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.
A Brief History of PostgreSQL
PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.
1977-1985 − A project called INGRES was developed.
Proof-of-concept for relational databases
Established the company Ingres in 1980
Bought by Computer Associates in 1994
1986-1994 − POSTGRES
Development of the concepts in INGRES with a focus on object orientation and the query language - Quel
The code base of INGRES was not used as a basis for POSTGRES
Commercialized as Illustra (bought by Informix, bought by IBM)
1994-1995 − Postgres95
Support for SQL was added in 1994
Released as Postgres95 in 1995
Re-released as PostgreSQL 6.0 in 1996
Establishment of the PostgreSQL Global Development Team
Key Features of PostgreSQL
PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).
PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −
Complex SQL queries
SQL Sub-selects
Foreign keys
Trigger
Views
Transactions
Multiversion concurrency control (MVCC)
Streaming Replication (as of 9.0)
Hot Standby (as of 9.0)
You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −
Data types
Functions
Operators
Aggregate functions
Index methods
Procedural Languages Support
PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');
INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.
I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.
Install Directly or not?
On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.
In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.
Installing Docker
Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.
After logging back in I then got the following message about hardware-assisted virtualization not being enabled.
After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.
Open a command prompt and run the following command.
docker run hello-world
You should output that starts with the following if your installation is working.
Hello from Docker!
This message shows that your installation appears to be working correctly.
What about Postgres?
Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.
docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres
The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.
docker create -v /var/lib/postgresql/data --name PostgresData alpine
The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.
Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.
docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres
The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.
If you run the docker ps -a command it will show you all your containers.
As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.
');
INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.
The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)
Step 1: Install PostgreSQL
Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):
Open a terminal window.
Issue the command sudo apt-get install postgresql.
Type the sudo password necessary to give you admin rights and hit Enter.
Allow apt to pick up any necessary dependencies.
Once the installation is complete, it''s time to set this baby up.
Step 2: Change the default user password
Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.
Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:
Open a terminal window.
Issue the command sudo passwd postgres.
Type (and confirm) that password to be used for this user.
The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:
postgres=#
All other users have to gain access to the prompt like so:
psql DB_NAME
where DB_NAME is the name of an existing database.
');
INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.
This seems to be the journey:
1. Lack of migrations is awesome! We can iterate so quickly for MVP
2. Get users
3. Add features, still enjoying the speed of iteration
4. Get more users
5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)
6. Realise you desperately need joins, transactions and other SQL features
7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.
I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?
My thought is definitely yes.
brandur on Aug 29, 2017 [-]
> I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?
I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.
The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.
The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.
Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.
martinald on Aug 29, 2017 [-]
I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.
I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.
Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.
I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.
I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');
INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.
The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.
Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.
As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.
A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.
Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2
10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');
INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.
I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”
I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.
Red Bull could sponsor it. I’d buy a T-shirt.
kbenson 8 months ago [-]
That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.
You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.
If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.
Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...
1: https://github.com/gothinkster/realworld
2: https://www.techempower.com/benchmarks/
etxm 8 months ago [-]
Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)
It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.
It would be cool to see things like disaster recovery and chaos proofing as well. ');
INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.
Leveraging the community
There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.
On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.
For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.
Always quality focused
No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.
Learn more');
INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.
Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.
Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.
maxxxxx 8 months ago [-]
Agreed. Switching to another system is expensive and the benefit is pretty questionable.
emsy 8 months ago [-]
Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.
TremendousJudge 8 months ago [-]
expand, please?
maxxxxx 8 months ago [-]
I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.
In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.
gopalv 8 months ago [-]
> Depending on your data some databases may be better than others and that should be easy to measure.
And the performance difference could be an accidental feature of the design and completely unintentional.
Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.
Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).
When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.
And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.
Though how it came about isn''t really intentional. ');
"""),
]
|
7,598 | 42e16def0fcf234f3d7c2709de36a321d8ddf29e | import collections
import re
from collections import Counter
import operator
import pickle
import math
import json
path='C:/Users/rahul/Desktop/CSCI 544/HW 2/op_spam_train/'
#path=sys.argv[1]
badWordList = ['and','the','was','for']
RE=r'\b[^\W\d_]+\b'
# NEGATIVE TWEETS
c=collections.Counter()
NT="negativeTweets.txt/"
with open("negativeTweets.txt") as f:
c.update( word.lower() for line in f for word in re.findall(r'\b[^\W\d_]+\b', line) if len(word)>2 and word not in badWordList)
# POSITIVE TWEETS
d=collections.Counter()
PT="positiveTweets.txt/"
with open("positiveTweets.txt") as f:
d.update( word.lower() for line in f for word in re.findall(r'\b[^\W\d_]+\b', line) if len(word)>2 and word not in badWordList)
# Storing Counts in a dictionary nb
dicts=[dict(c),dict(d)]
nb,cnt={},0
for d in dicts:
for k, v in d.items():
if(k in nb): nb[k][cnt]= nb[k][cnt]+v
else:
nb[k]=[1,1]
nb[k][cnt]= nb[k][cnt]+v
cnt=cnt+1
for k,v in nb.items():
print k,v
print len(nb);
totalClassWord=[0,0]
for k, v in nb.items():
totalClassWord=[x + y for x, y in zip(totalClassWord, v)]
prob={}
for k, v in nb.items():
prob[k]=[0,0]
prob[k][0]= math.log10( float(nb[k][0])/float(totalClassWord[0]))
prob[k][1]= math.log10( float(nb[k][1])/float(totalClassWord[1]))
for k,v in prob.items():
print k,v
#Dumping dictionary as JSON object in file
#with open('hackTechTweetClassificationModel.txt', 'wb') as handle: pickle.dump(prob, handle)
keys=json.dumps(prob, sort_keys=True)
output_file=open('hackTechTweetClassificationModel.txt', 'w')
output_file.write(keys)
output_file.close()
output_file=open('hackTechTweetPHP.php', 'w')
#Format of PHP output
#$result=mysqli_query($con, "INSERT INTO ttrain VALUES ('aaa','-2.232','222.4234')" );
for k,v in prob.items():
strop="$result=mysqli_query($con, \"INSERT INTO ttrain VALUES (\'"+str(k)+"\',\'"+str(v[0])+"\',\'"+str(v[1])+"\')\" );\n"
output_file.write(strop)
output_file.close()
|
7,599 | 5adb16c654a4e747f803590c42328fa6ba642e95 | import os
from subprocess import Popen, PIPE
from Bio import SeqIO
from Bio.Align.Applications import ClustalOmegaCommandline
from Bio import Phylo
from io import StringIO
# from ete3 import Tree, TreeStyle
import pylab
class TreeDrawer:
def __init__(self, sequences=None):
self.sequences = sequences
def make_alignment(self, method):
### Mulltiple Sequence Alignment ###
path = os.getcwd()
in_file = "example.fasta"
out_file = "alignment.aln"
if os.path.isfile("alignment.aln"):
os.remove("alignment.aln")
clustalomega_cline = ClustalOmegaCommandline(infile=in_file, outfile=out_file, verbose=True, iterations=1,
max_guidetree_iterations=1, max_hmm_iterations=1, dealign=True,
outfmt="clu")
print(clustalomega_cline)
stdout, stderr = clustalomega_cline()
### Convert to phylip format ###
SeqIO.convert("alignment.aln", "clustal", "alignment.phy", "phylip")
### Phylogentetic analysis ###
# Choose method proml, dnaml
# Maximum likelihood analysis #
# Run Phylip Proml program
instructions = bytes("alignment.phy\ny\n", 'utf-8')
proml = Popen("phylip " + method, stdin=PIPE, shell=True)
(out, err) = proml.communicate(instructions)
# Change output files names
files = Popen("mv outfile " + method + ".out", stdin=PIPE, shell=True)
(out, err) = files.communicate()
files = Popen("mv outtree " + method + ".tree", stdin=PIPE, shell=True)
(out, err) = files.communicate()
def draw_tree(self, filename):
# instructions = bytes("dnaml.tree\nl\na\ny\n", 'utf-8')
# dnaml = Popen("phylip drawtree", stdin=PIPE, shell=True)
# (out, err) = dnaml.communicate(instructions)
tree_file = open('dnaml.tree')
x = tree_file.read()
# t = Tree()
# ts = TreeStyle()
# ts.show_leaf_name = True
# ts.branch_vertical_margin = 10 # 10 pixels between adjacent branches
# t.show(tree_style=ts)
tree = Phylo.read(StringIO(x[:-2]), "newick")
Phylo.draw(tree, do_show=False)
pylab.savefig('biohackProject/static/images/'+filename+'.png', dpi=300)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.